VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 39973

Last change on this file since 39973 was 39971, checked in by vboxsync, 13 years ago

IEM: VERR_NOT_IMPLEMENTED -> VERR_IEM_INSTR_NOT_IMPLEMENTED or VERR_IEM_ASPECT_NOT_IMPLEMENTED.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 398.4 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 39971 2012-02-02 21:35:05Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 IEM_MC_ADVANCE_RIP();
133 IEM_MC_END();
134 break;
135
136 case IEMMODE_64BIT:
137 IEM_MC_BEGIN(3, 0);
138 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
139 IEM_MC_ARG(uint64_t, u64Src, 1);
140 IEM_MC_ARG(uint32_t *, pEFlags, 2);
141
142 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
143 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
144 IEM_MC_REF_EFLAGS(pEFlags);
145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
146
147 IEM_MC_ADVANCE_RIP();
148 IEM_MC_END();
149 break;
150 }
151 }
152 else
153 {
154 /*
155 * We're accessing memory.
156 * Note! We're putting the eflags on the stack here so we can commit them
157 * after the memory.
158 */
159 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
160 switch (pIemCpu->enmEffOpSize)
161 {
162 case IEMMODE_16BIT:
163 IEM_MC_BEGIN(3, 2);
164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
165 IEM_MC_ARG(uint16_t, u16Src, 1);
166 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
167 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
168
169 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
170 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
171 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
172 IEM_MC_FETCH_EFLAGS(EFlags);
173 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
175 else
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
177
178 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
179 IEM_MC_COMMIT_EFLAGS(EFlags);
180 IEM_MC_ADVANCE_RIP();
181 IEM_MC_END();
182 break;
183
184 case IEMMODE_32BIT:
185 IEM_MC_BEGIN(3, 2);
186 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
187 IEM_MC_ARG(uint32_t, u32Src, 1);
188 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
190
191 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
192 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
193 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
194 IEM_MC_FETCH_EFLAGS(EFlags);
195 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
197 else
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
199
200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
201 IEM_MC_COMMIT_EFLAGS(EFlags);
202 IEM_MC_ADVANCE_RIP();
203 IEM_MC_END();
204 break;
205
206 case IEMMODE_64BIT:
207 IEM_MC_BEGIN(3, 2);
208 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
209 IEM_MC_ARG(uint64_t, u64Src, 1);
210 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
211 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
212
213 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
214 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
215 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
216 IEM_MC_FETCH_EFLAGS(EFlags);
217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
219 else
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
221
222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
223 IEM_MC_COMMIT_EFLAGS(EFlags);
224 IEM_MC_ADVANCE_RIP();
225 IEM_MC_END();
226 break;
227 }
228 }
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
235 * the destination.
236 *
237 * @param pImpl Pointer to the instruction implementation (assembly).
238 */
239FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
240{
241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
243
244 /*
245 * If rm is denoting a register, no more instruction bytes.
246 */
247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
248 {
249 IEM_MC_BEGIN(3, 0);
250 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
251 IEM_MC_ARG(uint8_t, u8Src, 1);
252 IEM_MC_ARG(uint32_t *, pEFlags, 2);
253
254 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
255 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
256 IEM_MC_REF_EFLAGS(pEFlags);
257 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
258
259 IEM_MC_ADVANCE_RIP();
260 IEM_MC_END();
261 }
262 else
263 {
264 /*
265 * We're accessing memory.
266 */
267 IEM_MC_BEGIN(3, 1);
268 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
269 IEM_MC_ARG(uint8_t, u8Src, 1);
270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
272
273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
274 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
275 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
276 IEM_MC_REF_EFLAGS(pEFlags);
277 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
278
279 IEM_MC_ADVANCE_RIP();
280 IEM_MC_END();
281 }
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
288 * register as the destination.
289 *
290 * @param pImpl Pointer to the instruction implementation (assembly).
291 */
292FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
293{
294 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
295 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
296
297 /*
298 * If rm is denoting a register, no more instruction bytes.
299 */
300 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
301 {
302 switch (pIemCpu->enmEffOpSize)
303 {
304 case IEMMODE_16BIT:
305 IEM_MC_BEGIN(3, 0);
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
307 IEM_MC_ARG(uint16_t, u16Src, 1);
308 IEM_MC_ARG(uint32_t *, pEFlags, 2);
309
310 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
311 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
312 IEM_MC_REF_EFLAGS(pEFlags);
313 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
314
315 IEM_MC_ADVANCE_RIP();
316 IEM_MC_END();
317 break;
318
319 case IEMMODE_32BIT:
320 IEM_MC_BEGIN(3, 0);
321 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
322 IEM_MC_ARG(uint32_t, u32Src, 1);
323 IEM_MC_ARG(uint32_t *, pEFlags, 2);
324
325 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
326 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
327 IEM_MC_REF_EFLAGS(pEFlags);
328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
329
330 IEM_MC_ADVANCE_RIP();
331 IEM_MC_END();
332 break;
333
334 case IEMMODE_64BIT:
335 IEM_MC_BEGIN(3, 0);
336 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
337 IEM_MC_ARG(uint64_t, u64Src, 1);
338 IEM_MC_ARG(uint32_t *, pEFlags, 2);
339
340 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
341 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
342 IEM_MC_REF_EFLAGS(pEFlags);
343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
344
345 IEM_MC_ADVANCE_RIP();
346 IEM_MC_END();
347 break;
348 }
349 }
350 else
351 {
352 /*
353 * We're accessing memory.
354 */
355 switch (pIemCpu->enmEffOpSize)
356 {
357 case IEMMODE_16BIT:
358 IEM_MC_BEGIN(3, 1);
359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
360 IEM_MC_ARG(uint16_t, u16Src, 1);
361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
363
364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
365 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
366 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
367 IEM_MC_REF_EFLAGS(pEFlags);
368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
369
370 IEM_MC_ADVANCE_RIP();
371 IEM_MC_END();
372 break;
373
374 case IEMMODE_32BIT:
375 IEM_MC_BEGIN(3, 1);
376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
377 IEM_MC_ARG(uint32_t, u32Src, 1);
378 IEM_MC_ARG(uint32_t *, pEFlags, 2);
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
380
381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
382 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
383 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
384 IEM_MC_REF_EFLAGS(pEFlags);
385 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
386
387 IEM_MC_ADVANCE_RIP();
388 IEM_MC_END();
389 break;
390
391 case IEMMODE_64BIT:
392 IEM_MC_BEGIN(3, 1);
393 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
394 IEM_MC_ARG(uint64_t, u64Src, 1);
395 IEM_MC_ARG(uint32_t *, pEFlags, 2);
396 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
397
398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
399 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
400 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
401 IEM_MC_REF_EFLAGS(pEFlags);
402 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
403
404 IEM_MC_ADVANCE_RIP();
405 IEM_MC_END();
406 break;
407 }
408 }
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
415 * a byte immediate.
416 *
417 * @param pImpl Pointer to the instruction implementation (assembly).
418 */
419FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
420{
421 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
422 IEMOP_HLP_NO_LOCK_PREFIX();
423
424 IEM_MC_BEGIN(3, 0);
425 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
426 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
428
429 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
430 IEM_MC_REF_EFLAGS(pEFlags);
431 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
432
433 IEM_MC_ADVANCE_RIP();
434 IEM_MC_END();
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Common worker for instructions like ADD, AND, OR, ++ with working on
441 * AX/EAX/RAX with a word/dword immediate.
442 *
443 * @param pImpl Pointer to the instruction implementation (assembly).
444 */
445FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
446{
447 switch (pIemCpu->enmEffOpSize)
448 {
449 case IEMMODE_16BIT:
450 {
451 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
452 IEMOP_HLP_NO_LOCK_PREFIX();
453
454 IEM_MC_BEGIN(3, 0);
455 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
456 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
458
459 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
460 IEM_MC_REF_EFLAGS(pEFlags);
461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
462
463 IEM_MC_ADVANCE_RIP();
464 IEM_MC_END();
465 return VINF_SUCCESS;
466 }
467
468 case IEMMODE_32BIT:
469 {
470 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
471 IEMOP_HLP_NO_LOCK_PREFIX();
472
473 IEM_MC_BEGIN(3, 0);
474 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
475 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
476 IEM_MC_ARG(uint32_t *, pEFlags, 2);
477
478 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
479 IEM_MC_REF_EFLAGS(pEFlags);
480 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
481
482 IEM_MC_ADVANCE_RIP();
483 IEM_MC_END();
484 return VINF_SUCCESS;
485 }
486
487 case IEMMODE_64BIT:
488 {
489 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
490 IEMOP_HLP_NO_LOCK_PREFIX();
491
492 IEM_MC_BEGIN(3, 0);
493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
494 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
496
497 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
498 IEM_MC_REF_EFLAGS(pEFlags);
499 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
500
501 IEM_MC_ADVANCE_RIP();
502 IEM_MC_END();
503 return VINF_SUCCESS;
504 }
505
506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
507 }
508}
509
510
511/** Opcodes 0xf1, 0xd6. */
512FNIEMOP_DEF(iemOp_Invalid)
513{
514 IEMOP_MNEMONIC("Invalid");
515 return IEMOP_RAISE_INVALID_OPCODE();
516}
517
518
519
520/** @name ..... opcodes.
521 *
522 * @{
523 */
524
525/** @} */
526
527
528/** @name Two byte opcodes (first byte 0x0f).
529 *
530 * @{
531 */
532
533/** Opcode 0x0f 0x00 /0. */
534FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
535
536
537/** Opcode 0x0f 0x00 /1. */
538FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
539
540
541/** Opcode 0x0f 0x00 /2. */
542FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
543{
544 IEMOP_HLP_NO_LOCK_PREFIX();
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEM_MC_BEGIN(1, 0);
548 IEM_MC_ARG(uint16_t, u16Sel, 0);
549 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
550 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
551 IEM_MC_END();
552 }
553 else
554 {
555 IEM_MC_BEGIN(1, 1);
556 IEM_MC_ARG(uint16_t, u16Sel, 0);
557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
558 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
560 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
561 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
562 IEM_MC_END();
563 }
564 return VINF_SUCCESS;
565}
566
567
568/** Opcode 0x0f 0x00 /3. */
569FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
570{
571 IEMOP_HLP_NO_LOCK_PREFIX();
572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
573 {
574 IEM_MC_BEGIN(1, 0);
575 IEM_MC_ARG(uint16_t, u16Sel, 0);
576 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
577 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
578 IEM_MC_END();
579 }
580 else
581 {
582 IEM_MC_BEGIN(1, 1);
583 IEM_MC_ARG(uint16_t, u16Sel, 0);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
585 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
587 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
588 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
589 IEM_MC_END();
590 }
591 return VINF_SUCCESS;
592}
593
594
595/** Opcode 0x0f 0x00 /4. */
596FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
597
598
599/** Opcode 0x0f 0x00 /5. */
600FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
601
602
603/** Opcode 0x0f 0x00. */
604FNIEMOP_DEF(iemOp_Grp6)
605{
606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
608 {
609 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
610 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
611 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
612 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
613 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
614 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
615 case 6: return IEMOP_RAISE_INVALID_OPCODE();
616 case 7: return IEMOP_RAISE_INVALID_OPCODE();
617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
618 }
619
620}
621
622
623/** Opcode 0x0f 0x01 /0. */
624FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
625{
626 NOREF(pIemCpu); NOREF(bRm);
627 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
628}
629
630
631/** Opcode 0x0f 0x01 /0. */
632FNIEMOP_DEF(iemOp_Grp7_vmcall)
633{
634 AssertFailed();
635 return IEMOP_RAISE_INVALID_OPCODE();
636}
637
638
639/** Opcode 0x0f 0x01 /0. */
640FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
641{
642 AssertFailed();
643 return IEMOP_RAISE_INVALID_OPCODE();
644}
645
646
647/** Opcode 0x0f 0x01 /0. */
648FNIEMOP_DEF(iemOp_Grp7_vmresume)
649{
650 AssertFailed();
651 return IEMOP_RAISE_INVALID_OPCODE();
652}
653
654
655/** Opcode 0x0f 0x01 /0. */
656FNIEMOP_DEF(iemOp_Grp7_vmxoff)
657{
658 AssertFailed();
659 return IEMOP_RAISE_INVALID_OPCODE();
660}
661
662
663/** Opcode 0x0f 0x01 /1. */
664FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
665{
666 NOREF(pIemCpu); NOREF(bRm);
667 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
668}
669
670
671/** Opcode 0x0f 0x01 /1. */
672FNIEMOP_DEF(iemOp_Grp7_monitor)
673{
674 NOREF(pIemCpu);
675 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
676}
677
678
679/** Opcode 0x0f 0x01 /1. */
680FNIEMOP_DEF(iemOp_Grp7_mwait)
681{
682 NOREF(pIemCpu);
683 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
684}
685
686
687/** Opcode 0x0f 0x01 /2. */
688FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
689{
690 IEMOP_HLP_NO_LOCK_PREFIX();
691
692 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
693 ? IEMMODE_64BIT
694 : pIemCpu->enmEffOpSize;
695 IEM_MC_BEGIN(3, 1);
696 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
697 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
698 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
699 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
700 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
701 IEM_MC_END();
702 return VINF_SUCCESS;
703}
704
705
706/** Opcode 0x0f 0x01 /2. */
707FNIEMOP_DEF(iemOp_Grp7_xgetbv)
708{
709 AssertFailed();
710 return IEMOP_RAISE_INVALID_OPCODE();
711}
712
713
714/** Opcode 0x0f 0x01 /2. */
715FNIEMOP_DEF(iemOp_Grp7_xsetbv)
716{
717 AssertFailed();
718 return IEMOP_RAISE_INVALID_OPCODE();
719}
720
721
722/** Opcode 0x0f 0x01 /3. */
723FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
724{
725 IEMOP_HLP_NO_LOCK_PREFIX();
726
727 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
728 ? IEMMODE_64BIT
729 : pIemCpu->enmEffOpSize;
730 IEM_MC_BEGIN(3, 1);
731 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
732 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
733 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
734 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
735 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
736 IEM_MC_END();
737 return VINF_SUCCESS;
738}
739
740
741/** Opcode 0x0f 0x01 /4. */
742FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
743{
744 IEMOP_HLP_NO_LOCK_PREFIX();
745 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
746 {
747 switch (pIemCpu->enmEffOpSize)
748 {
749 case IEMMODE_16BIT:
750 IEM_MC_BEGIN(0, 1);
751 IEM_MC_LOCAL(uint16_t, u16Tmp);
752 IEM_MC_FETCH_CR0_U16(u16Tmp);
753 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
754 IEM_MC_ADVANCE_RIP();
755 IEM_MC_END();
756 return VINF_SUCCESS;
757
758 case IEMMODE_32BIT:
759 IEM_MC_BEGIN(0, 1);
760 IEM_MC_LOCAL(uint32_t, u32Tmp);
761 IEM_MC_FETCH_CR0_U32(u32Tmp);
762 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
763 IEM_MC_ADVANCE_RIP();
764 IEM_MC_END();
765 return VINF_SUCCESS;
766
767 case IEMMODE_64BIT:
768 IEM_MC_BEGIN(0, 1);
769 IEM_MC_LOCAL(uint64_t, u64Tmp);
770 IEM_MC_FETCH_CR0_U64(u64Tmp);
771 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
772 IEM_MC_ADVANCE_RIP();
773 IEM_MC_END();
774 return VINF_SUCCESS;
775
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778 }
779 else
780 {
781 /* Ignore operand size here, memory refs are always 16-bit. */
782 IEM_MC_BEGIN(0, 2);
783 IEM_MC_LOCAL(uint16_t, u16Tmp);
784 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
785 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
786 IEM_MC_FETCH_CR0_U16(u16Tmp);
787 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
788 IEM_MC_ADVANCE_RIP();
789 IEM_MC_END();
790 return VINF_SUCCESS;
791 }
792}
793
794
795/** Opcode 0x0f 0x01 /6. */
796FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
797{
798 /* The operand size is effectively ignored, all is 16-bit and only the
799 lower 3-bits are used. */
800 IEMOP_HLP_NO_LOCK_PREFIX();
801 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
802 {
803 IEM_MC_BEGIN(1, 0);
804 IEM_MC_ARG(uint16_t, u16Tmp, 0);
805 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
806 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
807 IEM_MC_END();
808 }
809 else
810 {
811 IEM_MC_BEGIN(1, 1);
812 IEM_MC_ARG(uint16_t, u16Tmp, 0);
813 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
814 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
815 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
816 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
817 IEM_MC_END();
818 }
819 return VINF_SUCCESS;
820}
821
822
823/** Opcode 0x0f 0x01 /7. */
824FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
825{
826 IEMOP_HLP_NO_LOCK_PREFIX();
827 IEM_MC_BEGIN(1, 1);
828 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
829 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
830 IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
831 IEM_MC_END();
832 return VINF_SUCCESS;
833}
834
835
836/** Opcode 0x0f 0x01 /7. */
837FNIEMOP_DEF(iemOp_Grp7_swapgs)
838{
839 NOREF(pIemCpu);
840 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
841}
842
843
844/** Opcode 0x0f 0x01 /7. */
845FNIEMOP_DEF(iemOp_Grp7_rdtscp)
846{
847 NOREF(pIemCpu);
848 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
849}
850
851
852/** Opcode 0x0f 0x01. */
853FNIEMOP_DEF(iemOp_Grp7)
854{
855 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
856 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
857 {
858 case 0:
859 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
860 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
861 switch (bRm & X86_MODRM_RM_MASK)
862 {
863 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
864 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
865 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
866 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
867 }
868 return IEMOP_RAISE_INVALID_OPCODE();
869
870 case 1:
871 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
872 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
873 switch (bRm & X86_MODRM_RM_MASK)
874 {
875 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
876 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
877 }
878 return IEMOP_RAISE_INVALID_OPCODE();
879
880 case 2:
881 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
882 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
883 switch (bRm & X86_MODRM_RM_MASK)
884 {
885 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
886 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
887 }
888 return IEMOP_RAISE_INVALID_OPCODE();
889
890 case 3:
891 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
892 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
893 return IEMOP_RAISE_INVALID_OPCODE();
894
895 case 4:
896 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
897
898 case 5:
899 return IEMOP_RAISE_INVALID_OPCODE();
900
901 case 6:
902 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
903
904 case 7:
905 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
906 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
907 switch (bRm & X86_MODRM_RM_MASK)
908 {
909 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
910 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
911 }
912 return IEMOP_RAISE_INVALID_OPCODE();
913
914 IEM_NOT_REACHED_DEFAULT_CASE_RET();
915 }
916}
917
918
919/** Opcode 0x0f 0x02. */
920FNIEMOP_STUB(iemOp_lar_Gv_Ew);
921/** Opcode 0x0f 0x03. */
922FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
923/** Opcode 0x0f 0x04. */
924FNIEMOP_STUB(iemOp_syscall);
925
926
927/** Opcode 0x0f 0x05. */
928FNIEMOP_DEF(iemOp_clts)
929{
930 IEMOP_MNEMONIC("clts");
931 IEMOP_HLP_NO_LOCK_PREFIX();
932 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
933}
934
935
936/** Opcode 0x0f 0x06. */
937FNIEMOP_STUB(iemOp_sysret);
938/** Opcode 0x0f 0x08. */
939FNIEMOP_STUB(iemOp_invd);
940/** Opcode 0x0f 0x09. */
941FNIEMOP_STUB(iemOp_wbinvd);
942/** Opcode 0x0f 0x0b. */
943FNIEMOP_STUB(iemOp_ud2);
944/** Opcode 0x0f 0x0d. */
945FNIEMOP_STUB(iemOp_nop_Ev_prefetch);
946/** Opcode 0x0f 0x0e. */
947FNIEMOP_STUB(iemOp_femms);
948/** Opcode 0x0f 0x0f. */
949FNIEMOP_STUB(iemOp_3Dnow);
950/** Opcode 0x0f 0x10. */
951FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
952/** Opcode 0x0f 0x11. */
953FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
954/** Opcode 0x0f 0x12. */
955FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
956/** Opcode 0x0f 0x13. */
957FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
958/** Opcode 0x0f 0x14. */
959FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
960/** Opcode 0x0f 0x15. */
961FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
962/** Opcode 0x0f 0x16. */
963FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
964/** Opcode 0x0f 0x17. */
965FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
966/** Opcode 0x0f 0x18. */
967FNIEMOP_STUB(iemOp_prefetch_Grp16);
968
969
970/** Opcode 0x0f 0x20. */
971FNIEMOP_DEF(iemOp_mov_Rd_Cd)
972{
973 /* mod is ignored, as is operand size overrides. */
974 IEMOP_MNEMONIC("mov Rd,Cd");
975 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
976 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
977 else
978 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
979
980 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
981 * before the privilege level violation (\#GP). */
982 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
983 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
984 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
985 {
986 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
987 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
988 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
989 iCrReg |= 8;
990 }
991 switch (iCrReg)
992 {
993 case 0: case 2: case 3: case 4: case 8:
994 break;
995 default:
996 return IEMOP_RAISE_INVALID_OPCODE();
997 }
998
999 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
1000}
1001
1002
1003/** Opcode 0x0f 0x21. */
1004FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1005{
1006 IEMOP_MNEMONIC("mov Rd,Dd");
1007 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1008 IEMOP_HLP_NO_LOCK_PREFIX();
1009 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1010 return IEMOP_RAISE_INVALID_OPCODE();
1011 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1012 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1013 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1014}
1015
1016
1017/** Opcode 0x0f 0x22. */
1018FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1019{
1020 /* mod is ignored, as is operand size overrides. */
1021 IEMOP_MNEMONIC("mov Cd,Rd");
1022 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1023 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1024 else
1025 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1026
1027 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1028 * before the privilege level violation (\#GP). */
1029 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1030 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1031 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1032 {
1033 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1034 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1035 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1036 iCrReg |= 8;
1037 }
1038 switch (iCrReg)
1039 {
1040 case 0: case 2: case 3: case 4: case 8:
1041 break;
1042 default:
1043 return IEMOP_RAISE_INVALID_OPCODE();
1044 }
1045
1046 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1047}
1048
1049
1050/** Opcode 0x0f 0x23. */
1051FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1052{
1053 IEMOP_MNEMONIC("mov Dd,Rd");
1054 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1055 IEMOP_HLP_NO_LOCK_PREFIX();
1056 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1057 return IEMOP_RAISE_INVALID_OPCODE();
1058 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1059 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1060 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1061}
1062
1063
1064/** Opcode 0x0f 0x24. */
1065FNIEMOP_DEF(iemOp_mov_Rd_Td)
1066{
1067 IEMOP_MNEMONIC("mov Rd,Td");
1068/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1069 return IEMOP_RAISE_INVALID_OPCODE();
1070}
1071
1072
1073
1074/** Opcode 0x0f 0x26. */
1075FNIEMOP_DEF(iemOp_mov_Td_Rd)
1076{
1077 IEMOP_MNEMONIC("mov Td,Rd");
1078 return IEMOP_RAISE_INVALID_OPCODE();
1079}
1080
1081
1082/** Opcode 0x0f 0x28. */
1083FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1084/** Opcode 0x0f 0x29. */
1085FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1086/** Opcode 0x0f 0x2a. */
1087FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1088/** Opcode 0x0f 0x2b. */
1089FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1090/** Opcode 0x0f 0x2c. */
1091FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1092/** Opcode 0x0f 0x2d. */
1093FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1094/** Opcode 0x0f 0x2e. */
1095FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1096/** Opcode 0x0f 0x2f. */
1097FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1098/** Opcode 0x0f 0x30. */
1099FNIEMOP_STUB(iemOp_wrmsr);
1100
1101
1102/** Opcode 0x0f 0x31. */
1103FNIEMOP_DEF(iemOp_rdtsc)
1104{
1105 IEMOP_MNEMONIC("rdtsc");
1106 IEMOP_HLP_NO_LOCK_PREFIX();
1107 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1108}
1109
1110
1111/** Opcode 0x0f 0x33. */
1112FNIEMOP_STUB(iemOp_rdmsr);
1113/** Opcode 0x0f 0x34. */
1114FNIEMOP_STUB(iemOp_rdpmc);
1115/** Opcode 0x0f 0x34. */
1116FNIEMOP_STUB(iemOp_sysenter);
1117/** Opcode 0x0f 0x35. */
1118FNIEMOP_STUB(iemOp_sysexit);
1119/** Opcode 0x0f 0x37. */
1120FNIEMOP_STUB(iemOp_getsec);
1121/** Opcode 0x0f 0x38. */
1122FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1123/** Opcode 0x0f 0x39. */
1124FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1125/** Opcode 0x0f 0x3c (?). */
1126FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1127
1128/**
1129 * Implements a conditional move.
1130 *
1131 * Wish there was an obvious way to do this where we could share and reduce
1132 * code bloat.
1133 *
1134 * @param a_Cnd The conditional "microcode" operation.
1135 */
1136#define CMOV_X(a_Cnd) \
1137 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1138 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1139 { \
1140 switch (pIemCpu->enmEffOpSize) \
1141 { \
1142 case IEMMODE_16BIT: \
1143 IEM_MC_BEGIN(0, 1); \
1144 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1145 a_Cnd { \
1146 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1147 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1148 } IEM_MC_ENDIF(); \
1149 IEM_MC_ADVANCE_RIP(); \
1150 IEM_MC_END(); \
1151 return VINF_SUCCESS; \
1152 \
1153 case IEMMODE_32BIT: \
1154 IEM_MC_BEGIN(0, 1); \
1155 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1156 a_Cnd { \
1157 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1158 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1159 } IEM_MC_ELSE() { \
1160 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1161 } IEM_MC_ENDIF(); \
1162 IEM_MC_ADVANCE_RIP(); \
1163 IEM_MC_END(); \
1164 return VINF_SUCCESS; \
1165 \
1166 case IEMMODE_64BIT: \
1167 IEM_MC_BEGIN(0, 1); \
1168 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1169 a_Cnd { \
1170 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1171 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1172 } IEM_MC_ENDIF(); \
1173 IEM_MC_ADVANCE_RIP(); \
1174 IEM_MC_END(); \
1175 return VINF_SUCCESS; \
1176 \
1177 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1178 } \
1179 } \
1180 else \
1181 { \
1182 switch (pIemCpu->enmEffOpSize) \
1183 { \
1184 case IEMMODE_16BIT: \
1185 IEM_MC_BEGIN(0, 2); \
1186 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1187 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1188 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1189 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1190 a_Cnd { \
1191 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1192 } IEM_MC_ENDIF(); \
1193 IEM_MC_ADVANCE_RIP(); \
1194 IEM_MC_END(); \
1195 return VINF_SUCCESS; \
1196 \
1197 case IEMMODE_32BIT: \
1198 IEM_MC_BEGIN(0, 2); \
1199 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1200 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1201 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1202 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1203 a_Cnd { \
1204 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1205 } IEM_MC_ELSE() { \
1206 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1207 } IEM_MC_ENDIF(); \
1208 IEM_MC_ADVANCE_RIP(); \
1209 IEM_MC_END(); \
1210 return VINF_SUCCESS; \
1211 \
1212 case IEMMODE_64BIT: \
1213 IEM_MC_BEGIN(0, 2); \
1214 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1215 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1216 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1217 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1218 a_Cnd { \
1219 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1220 } IEM_MC_ENDIF(); \
1221 IEM_MC_ADVANCE_RIP(); \
1222 IEM_MC_END(); \
1223 return VINF_SUCCESS; \
1224 \
1225 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1226 } \
1227 } do {} while (0)
1228
1229
1230
1231/** Opcode 0x0f 0x40. */
1232FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1233{
1234 IEMOP_MNEMONIC("cmovo Gv,Ev");
1235 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1236}
1237
1238
1239/** Opcode 0x0f 0x41. */
1240FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1241{
1242 IEMOP_MNEMONIC("cmovno Gv,Ev");
1243 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1244}
1245
1246
1247/** Opcode 0x0f 0x42. */
1248FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1249{
1250 IEMOP_MNEMONIC("cmovc Gv,Ev");
1251 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1252}
1253
1254
1255/** Opcode 0x0f 0x43. */
1256FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1257{
1258 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1259 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1260}
1261
1262
1263/** Opcode 0x0f 0x44. */
1264FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1265{
1266 IEMOP_MNEMONIC("cmove Gv,Ev");
1267 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1268}
1269
1270
1271/** Opcode 0x0f 0x45. */
1272FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1273{
1274 IEMOP_MNEMONIC("cmovne Gv,Ev");
1275 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1276}
1277
1278
1279/** Opcode 0x0f 0x46. */
1280FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1281{
1282 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1283 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1284}
1285
1286
1287/** Opcode 0x0f 0x47. */
1288FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1289{
1290 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1291 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1292}
1293
1294
1295/** Opcode 0x0f 0x48. */
1296FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1297{
1298 IEMOP_MNEMONIC("cmovs Gv,Ev");
1299 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1300}
1301
1302
1303/** Opcode 0x0f 0x49. */
1304FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1305{
1306 IEMOP_MNEMONIC("cmovns Gv,Ev");
1307 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1308}
1309
1310
1311/** Opcode 0x0f 0x4a. */
1312FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1313{
1314 IEMOP_MNEMONIC("cmovp Gv,Ev");
1315 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1316}
1317
1318
1319/** Opcode 0x0f 0x4b. */
1320FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1321{
1322 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1323 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1324}
1325
1326
1327/** Opcode 0x0f 0x4c. */
1328FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1329{
1330 IEMOP_MNEMONIC("cmovl Gv,Ev");
1331 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1332}
1333
1334
1335/** Opcode 0x0f 0x4d. */
1336FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1337{
1338 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1339 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1340}
1341
1342
1343/** Opcode 0x0f 0x4e. */
1344FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1345{
1346 IEMOP_MNEMONIC("cmovle Gv,Ev");
1347 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1348}
1349
1350
1351/** Opcode 0x0f 0x4f. */
1352FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1353{
1354 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1355 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1356}
1357
1358#undef CMOV_X
1359
1360/** Opcode 0x0f 0x50. */
1361FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1362/** Opcode 0x0f 0x51. */
1363FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1364/** Opcode 0x0f 0x52. */
1365FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1366/** Opcode 0x0f 0x53. */
1367FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1368/** Opcode 0x0f 0x54. */
1369FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1370/** Opcode 0x0f 0x55. */
1371FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1372/** Opcode 0x0f 0x56. */
1373FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1374/** Opcode 0x0f 0x57. */
1375FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1376/** Opcode 0x0f 0x58. */
1377FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1378/** Opcode 0x0f 0x59. */
1379FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1380/** Opcode 0x0f 0x5a. */
1381FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1382/** Opcode 0x0f 0x5b. */
1383FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1384/** Opcode 0x0f 0x5c. */
1385FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1386/** Opcode 0x0f 0x5d. */
1387FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1388/** Opcode 0x0f 0x5e. */
1389FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1390/** Opcode 0x0f 0x5f. */
1391FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1392/** Opcode 0x0f 0x60. */
1393FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1394/** Opcode 0x0f 0x61. */
1395FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1396/** Opcode 0x0f 0x62. */
1397FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1398/** Opcode 0x0f 0x63. */
1399FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1400/** Opcode 0x0f 0x64. */
1401FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1402/** Opcode 0x0f 0x65. */
1403FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1404/** Opcode 0x0f 0x66. */
1405FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1406/** Opcode 0x0f 0x67. */
1407FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1408/** Opcode 0x0f 0x68. */
1409FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1410/** Opcode 0x0f 0x69. */
1411FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1412/** Opcode 0x0f 0x6a. */
1413FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1414/** Opcode 0x0f 0x6b. */
1415FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1416/** Opcode 0x0f 0x6c. */
1417FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1418/** Opcode 0x0f 0x6d. */
1419FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1420/** Opcode 0x0f 0x6e. */
1421FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1422/** Opcode 0x0f 0x6f. */
1423FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1424/** Opcode 0x0f 0x70. */
1425FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1426/** Opcode 0x0f 0x71. */
1427FNIEMOP_STUB(iemOp_Grp12);
1428/** Opcode 0x0f 0x72. */
1429FNIEMOP_STUB(iemOp_Grp13);
1430/** Opcode 0x0f 0x73. */
1431FNIEMOP_STUB(iemOp_Grp14);
1432/** Opcode 0x0f 0x74. */
1433FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1434/** Opcode 0x0f 0x75. */
1435FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1436/** Opcode 0x0f 0x76. */
1437FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1438/** Opcode 0x0f 0x77. */
1439FNIEMOP_STUB(iemOp_emms);
1440/** Opcode 0x0f 0x78. */
1441FNIEMOP_STUB(iemOp_vmread);
1442/** Opcode 0x0f 0x79. */
1443FNIEMOP_STUB(iemOp_vmwrite);
1444/** Opcode 0x0f 0x7c. */
1445FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1446/** Opcode 0x0f 0x7d. */
1447FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1448/** Opcode 0x0f 0x7e. */
1449FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1450/** Opcode 0x0f 0x7f. */
1451FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1452
1453
1454/** Opcode 0x0f 0x80. */
1455FNIEMOP_DEF(iemOp_jo_Jv)
1456{
1457 IEMOP_MNEMONIC("jo Jv");
1458 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1459 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1460 {
1461 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1462 IEMOP_HLP_NO_LOCK_PREFIX();
1463
1464 IEM_MC_BEGIN(0, 0);
1465 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1466 IEM_MC_REL_JMP_S16(i16Imm);
1467 } IEM_MC_ELSE() {
1468 IEM_MC_ADVANCE_RIP();
1469 } IEM_MC_ENDIF();
1470 IEM_MC_END();
1471 }
1472 else
1473 {
1474 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1475 IEMOP_HLP_NO_LOCK_PREFIX();
1476
1477 IEM_MC_BEGIN(0, 0);
1478 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1479 IEM_MC_REL_JMP_S32(i32Imm);
1480 } IEM_MC_ELSE() {
1481 IEM_MC_ADVANCE_RIP();
1482 } IEM_MC_ENDIF();
1483 IEM_MC_END();
1484 }
1485 return VINF_SUCCESS;
1486}
1487
1488
1489/** Opcode 0x0f 0x81. */
1490FNIEMOP_DEF(iemOp_jno_Jv)
1491{
1492 IEMOP_MNEMONIC("jno Jv");
1493 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1494 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1495 {
1496 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1497 IEMOP_HLP_NO_LOCK_PREFIX();
1498
1499 IEM_MC_BEGIN(0, 0);
1500 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1501 IEM_MC_ADVANCE_RIP();
1502 } IEM_MC_ELSE() {
1503 IEM_MC_REL_JMP_S16(i16Imm);
1504 } IEM_MC_ENDIF();
1505 IEM_MC_END();
1506 }
1507 else
1508 {
1509 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1510 IEMOP_HLP_NO_LOCK_PREFIX();
1511
1512 IEM_MC_BEGIN(0, 0);
1513 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1514 IEM_MC_ADVANCE_RIP();
1515 } IEM_MC_ELSE() {
1516 IEM_MC_REL_JMP_S32(i32Imm);
1517 } IEM_MC_ENDIF();
1518 IEM_MC_END();
1519 }
1520 return VINF_SUCCESS;
1521}
1522
1523
1524/** Opcode 0x0f 0x82. */
1525FNIEMOP_DEF(iemOp_jc_Jv)
1526{
1527 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1528 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1529 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1530 {
1531 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1532 IEMOP_HLP_NO_LOCK_PREFIX();
1533
1534 IEM_MC_BEGIN(0, 0);
1535 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1536 IEM_MC_REL_JMP_S16(i16Imm);
1537 } IEM_MC_ELSE() {
1538 IEM_MC_ADVANCE_RIP();
1539 } IEM_MC_ENDIF();
1540 IEM_MC_END();
1541 }
1542 else
1543 {
1544 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1545 IEMOP_HLP_NO_LOCK_PREFIX();
1546
1547 IEM_MC_BEGIN(0, 0);
1548 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1549 IEM_MC_REL_JMP_S32(i32Imm);
1550 } IEM_MC_ELSE() {
1551 IEM_MC_ADVANCE_RIP();
1552 } IEM_MC_ENDIF();
1553 IEM_MC_END();
1554 }
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/** Opcode 0x0f 0x83. */
1560FNIEMOP_DEF(iemOp_jnc_Jv)
1561{
1562 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1563 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1564 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1565 {
1566 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1567 IEMOP_HLP_NO_LOCK_PREFIX();
1568
1569 IEM_MC_BEGIN(0, 0);
1570 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1571 IEM_MC_ADVANCE_RIP();
1572 } IEM_MC_ELSE() {
1573 IEM_MC_REL_JMP_S16(i16Imm);
1574 } IEM_MC_ENDIF();
1575 IEM_MC_END();
1576 }
1577 else
1578 {
1579 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1580 IEMOP_HLP_NO_LOCK_PREFIX();
1581
1582 IEM_MC_BEGIN(0, 0);
1583 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1584 IEM_MC_ADVANCE_RIP();
1585 } IEM_MC_ELSE() {
1586 IEM_MC_REL_JMP_S32(i32Imm);
1587 } IEM_MC_ENDIF();
1588 IEM_MC_END();
1589 }
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/** Opcode 0x0f 0x84. */
1595FNIEMOP_DEF(iemOp_je_Jv)
1596{
1597 IEMOP_MNEMONIC("je/jz Jv");
1598 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1599 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1600 {
1601 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1602 IEMOP_HLP_NO_LOCK_PREFIX();
1603
1604 IEM_MC_BEGIN(0, 0);
1605 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1606 IEM_MC_REL_JMP_S16(i16Imm);
1607 } IEM_MC_ELSE() {
1608 IEM_MC_ADVANCE_RIP();
1609 } IEM_MC_ENDIF();
1610 IEM_MC_END();
1611 }
1612 else
1613 {
1614 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1615 IEMOP_HLP_NO_LOCK_PREFIX();
1616
1617 IEM_MC_BEGIN(0, 0);
1618 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1619 IEM_MC_REL_JMP_S32(i32Imm);
1620 } IEM_MC_ELSE() {
1621 IEM_MC_ADVANCE_RIP();
1622 } IEM_MC_ENDIF();
1623 IEM_MC_END();
1624 }
1625 return VINF_SUCCESS;
1626}
1627
1628
1629/** Opcode 0x0f 0x85. */
1630FNIEMOP_DEF(iemOp_jne_Jv)
1631{
1632 IEMOP_MNEMONIC("jne/jnz Jv");
1633 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1634 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1635 {
1636 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1637 IEMOP_HLP_NO_LOCK_PREFIX();
1638
1639 IEM_MC_BEGIN(0, 0);
1640 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1641 IEM_MC_ADVANCE_RIP();
1642 } IEM_MC_ELSE() {
1643 IEM_MC_REL_JMP_S16(i16Imm);
1644 } IEM_MC_ENDIF();
1645 IEM_MC_END();
1646 }
1647 else
1648 {
1649 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1650 IEMOP_HLP_NO_LOCK_PREFIX();
1651
1652 IEM_MC_BEGIN(0, 0);
1653 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1654 IEM_MC_ADVANCE_RIP();
1655 } IEM_MC_ELSE() {
1656 IEM_MC_REL_JMP_S32(i32Imm);
1657 } IEM_MC_ENDIF();
1658 IEM_MC_END();
1659 }
1660 return VINF_SUCCESS;
1661}
1662
1663
1664/** Opcode 0x0f 0x86. */
1665FNIEMOP_DEF(iemOp_jbe_Jv)
1666{
1667 IEMOP_MNEMONIC("jbe/jna Jv");
1668 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1669 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1670 {
1671 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1672 IEMOP_HLP_NO_LOCK_PREFIX();
1673
1674 IEM_MC_BEGIN(0, 0);
1675 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1676 IEM_MC_REL_JMP_S16(i16Imm);
1677 } IEM_MC_ELSE() {
1678 IEM_MC_ADVANCE_RIP();
1679 } IEM_MC_ENDIF();
1680 IEM_MC_END();
1681 }
1682 else
1683 {
1684 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1685 IEMOP_HLP_NO_LOCK_PREFIX();
1686
1687 IEM_MC_BEGIN(0, 0);
1688 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1689 IEM_MC_REL_JMP_S32(i32Imm);
1690 } IEM_MC_ELSE() {
1691 IEM_MC_ADVANCE_RIP();
1692 } IEM_MC_ENDIF();
1693 IEM_MC_END();
1694 }
1695 return VINF_SUCCESS;
1696}
1697
1698
1699/** Opcode 0x0f 0x87. */
1700FNIEMOP_DEF(iemOp_jnbe_Jv)
1701{
1702 IEMOP_MNEMONIC("jnbe/ja Jv");
1703 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1704 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1705 {
1706 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1707 IEMOP_HLP_NO_LOCK_PREFIX();
1708
1709 IEM_MC_BEGIN(0, 0);
1710 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1711 IEM_MC_ADVANCE_RIP();
1712 } IEM_MC_ELSE() {
1713 IEM_MC_REL_JMP_S16(i16Imm);
1714 } IEM_MC_ENDIF();
1715 IEM_MC_END();
1716 }
1717 else
1718 {
1719 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1720 IEMOP_HLP_NO_LOCK_PREFIX();
1721
1722 IEM_MC_BEGIN(0, 0);
1723 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1724 IEM_MC_ADVANCE_RIP();
1725 } IEM_MC_ELSE() {
1726 IEM_MC_REL_JMP_S32(i32Imm);
1727 } IEM_MC_ENDIF();
1728 IEM_MC_END();
1729 }
1730 return VINF_SUCCESS;
1731}
1732
1733
1734/** Opcode 0x0f 0x88. */
1735FNIEMOP_DEF(iemOp_js_Jv)
1736{
1737 IEMOP_MNEMONIC("js Jv");
1738 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1739 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1740 {
1741 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1742 IEMOP_HLP_NO_LOCK_PREFIX();
1743
1744 IEM_MC_BEGIN(0, 0);
1745 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1746 IEM_MC_REL_JMP_S16(i16Imm);
1747 } IEM_MC_ELSE() {
1748 IEM_MC_ADVANCE_RIP();
1749 } IEM_MC_ENDIF();
1750 IEM_MC_END();
1751 }
1752 else
1753 {
1754 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1755 IEMOP_HLP_NO_LOCK_PREFIX();
1756
1757 IEM_MC_BEGIN(0, 0);
1758 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1759 IEM_MC_REL_JMP_S32(i32Imm);
1760 } IEM_MC_ELSE() {
1761 IEM_MC_ADVANCE_RIP();
1762 } IEM_MC_ENDIF();
1763 IEM_MC_END();
1764 }
1765 return VINF_SUCCESS;
1766}
1767
1768
1769/** Opcode 0x0f 0x89. */
1770FNIEMOP_DEF(iemOp_jns_Jv)
1771{
1772 IEMOP_MNEMONIC("jns Jv");
1773 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1774 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1775 {
1776 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1777 IEMOP_HLP_NO_LOCK_PREFIX();
1778
1779 IEM_MC_BEGIN(0, 0);
1780 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1781 IEM_MC_ADVANCE_RIP();
1782 } IEM_MC_ELSE() {
1783 IEM_MC_REL_JMP_S16(i16Imm);
1784 } IEM_MC_ENDIF();
1785 IEM_MC_END();
1786 }
1787 else
1788 {
1789 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1790 IEMOP_HLP_NO_LOCK_PREFIX();
1791
1792 IEM_MC_BEGIN(0, 0);
1793 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1794 IEM_MC_ADVANCE_RIP();
1795 } IEM_MC_ELSE() {
1796 IEM_MC_REL_JMP_S32(i32Imm);
1797 } IEM_MC_ENDIF();
1798 IEM_MC_END();
1799 }
1800 return VINF_SUCCESS;
1801}
1802
1803
1804/** Opcode 0x0f 0x8a. */
1805FNIEMOP_DEF(iemOp_jp_Jv)
1806{
1807 IEMOP_MNEMONIC("jp Jv");
1808 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1809 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1810 {
1811 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1812 IEMOP_HLP_NO_LOCK_PREFIX();
1813
1814 IEM_MC_BEGIN(0, 0);
1815 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1816 IEM_MC_REL_JMP_S16(i16Imm);
1817 } IEM_MC_ELSE() {
1818 IEM_MC_ADVANCE_RIP();
1819 } IEM_MC_ENDIF();
1820 IEM_MC_END();
1821 }
1822 else
1823 {
1824 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1825 IEMOP_HLP_NO_LOCK_PREFIX();
1826
1827 IEM_MC_BEGIN(0, 0);
1828 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1829 IEM_MC_REL_JMP_S32(i32Imm);
1830 } IEM_MC_ELSE() {
1831 IEM_MC_ADVANCE_RIP();
1832 } IEM_MC_ENDIF();
1833 IEM_MC_END();
1834 }
1835 return VINF_SUCCESS;
1836}
1837
1838
1839/** Opcode 0x0f 0x8b. */
1840FNIEMOP_DEF(iemOp_jnp_Jv)
1841{
1842 IEMOP_MNEMONIC("jo Jv");
1843 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1844 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1845 {
1846 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1847 IEMOP_HLP_NO_LOCK_PREFIX();
1848
1849 IEM_MC_BEGIN(0, 0);
1850 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1851 IEM_MC_ADVANCE_RIP();
1852 } IEM_MC_ELSE() {
1853 IEM_MC_REL_JMP_S16(i16Imm);
1854 } IEM_MC_ENDIF();
1855 IEM_MC_END();
1856 }
1857 else
1858 {
1859 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1860 IEMOP_HLP_NO_LOCK_PREFIX();
1861
1862 IEM_MC_BEGIN(0, 0);
1863 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1864 IEM_MC_ADVANCE_RIP();
1865 } IEM_MC_ELSE() {
1866 IEM_MC_REL_JMP_S32(i32Imm);
1867 } IEM_MC_ENDIF();
1868 IEM_MC_END();
1869 }
1870 return VINF_SUCCESS;
1871}
1872
1873
1874/** Opcode 0x0f 0x8c. */
1875FNIEMOP_DEF(iemOp_jl_Jv)
1876{
1877 IEMOP_MNEMONIC("jl/jnge Jv");
1878 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1879 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1880 {
1881 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1882 IEMOP_HLP_NO_LOCK_PREFIX();
1883
1884 IEM_MC_BEGIN(0, 0);
1885 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1886 IEM_MC_REL_JMP_S16(i16Imm);
1887 } IEM_MC_ELSE() {
1888 IEM_MC_ADVANCE_RIP();
1889 } IEM_MC_ENDIF();
1890 IEM_MC_END();
1891 }
1892 else
1893 {
1894 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1895 IEMOP_HLP_NO_LOCK_PREFIX();
1896
1897 IEM_MC_BEGIN(0, 0);
1898 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1899 IEM_MC_REL_JMP_S32(i32Imm);
1900 } IEM_MC_ELSE() {
1901 IEM_MC_ADVANCE_RIP();
1902 } IEM_MC_ENDIF();
1903 IEM_MC_END();
1904 }
1905 return VINF_SUCCESS;
1906}
1907
1908
1909/** Opcode 0x0f 0x8d. */
1910FNIEMOP_DEF(iemOp_jnl_Jv)
1911{
1912 IEMOP_MNEMONIC("jnl/jge Jv");
1913 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1914 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1915 {
1916 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1917 IEMOP_HLP_NO_LOCK_PREFIX();
1918
1919 IEM_MC_BEGIN(0, 0);
1920 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1921 IEM_MC_ADVANCE_RIP();
1922 } IEM_MC_ELSE() {
1923 IEM_MC_REL_JMP_S16(i16Imm);
1924 } IEM_MC_ENDIF();
1925 IEM_MC_END();
1926 }
1927 else
1928 {
1929 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1930 IEMOP_HLP_NO_LOCK_PREFIX();
1931
1932 IEM_MC_BEGIN(0, 0);
1933 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1934 IEM_MC_ADVANCE_RIP();
1935 } IEM_MC_ELSE() {
1936 IEM_MC_REL_JMP_S32(i32Imm);
1937 } IEM_MC_ENDIF();
1938 IEM_MC_END();
1939 }
1940 return VINF_SUCCESS;
1941}
1942
1943
1944/** Opcode 0x0f 0x8e. */
1945FNIEMOP_DEF(iemOp_jle_Jv)
1946{
1947 IEMOP_MNEMONIC("jle/jng Jv");
1948 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1949 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1950 {
1951 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1952 IEMOP_HLP_NO_LOCK_PREFIX();
1953
1954 IEM_MC_BEGIN(0, 0);
1955 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1956 IEM_MC_REL_JMP_S16(i16Imm);
1957 } IEM_MC_ELSE() {
1958 IEM_MC_ADVANCE_RIP();
1959 } IEM_MC_ENDIF();
1960 IEM_MC_END();
1961 }
1962 else
1963 {
1964 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1965 IEMOP_HLP_NO_LOCK_PREFIX();
1966
1967 IEM_MC_BEGIN(0, 0);
1968 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1969 IEM_MC_REL_JMP_S32(i32Imm);
1970 } IEM_MC_ELSE() {
1971 IEM_MC_ADVANCE_RIP();
1972 } IEM_MC_ENDIF();
1973 IEM_MC_END();
1974 }
1975 return VINF_SUCCESS;
1976}
1977
1978
1979/** Opcode 0x0f 0x8f. */
1980FNIEMOP_DEF(iemOp_jnle_Jv)
1981{
1982 IEMOP_MNEMONIC("jnle/jg Jv");
1983 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1984 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1985 {
1986 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1987 IEMOP_HLP_NO_LOCK_PREFIX();
1988
1989 IEM_MC_BEGIN(0, 0);
1990 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1991 IEM_MC_ADVANCE_RIP();
1992 } IEM_MC_ELSE() {
1993 IEM_MC_REL_JMP_S16(i16Imm);
1994 } IEM_MC_ENDIF();
1995 IEM_MC_END();
1996 }
1997 else
1998 {
1999 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
2000 IEMOP_HLP_NO_LOCK_PREFIX();
2001
2002 IEM_MC_BEGIN(0, 0);
2003 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2004 IEM_MC_ADVANCE_RIP();
2005 } IEM_MC_ELSE() {
2006 IEM_MC_REL_JMP_S32(i32Imm);
2007 } IEM_MC_ENDIF();
2008 IEM_MC_END();
2009 }
2010 return VINF_SUCCESS;
2011}
2012
2013
2014/** Opcode 0x0f 0x90. */
2015FNIEMOP_DEF(iemOp_seto_Eb)
2016{
2017 IEMOP_MNEMONIC("seto Eb");
2018 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2019 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2020
2021 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2022 * any way. AMD says it's "unused", whatever that means. We're
2023 * ignoring for now. */
2024 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2025 {
2026 /* register target */
2027 IEM_MC_BEGIN(0, 0);
2028 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2029 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2030 } IEM_MC_ELSE() {
2031 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2032 } IEM_MC_ENDIF();
2033 IEM_MC_ADVANCE_RIP();
2034 IEM_MC_END();
2035 }
2036 else
2037 {
2038 /* memory target */
2039 IEM_MC_BEGIN(0, 1);
2040 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2041 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2042 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2043 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2044 } IEM_MC_ELSE() {
2045 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2046 } IEM_MC_ENDIF();
2047 IEM_MC_ADVANCE_RIP();
2048 IEM_MC_END();
2049 }
2050 return VINF_SUCCESS;
2051}
2052
2053
2054/** Opcode 0x0f 0x91. */
2055FNIEMOP_DEF(iemOp_setno_Eb)
2056{
2057 IEMOP_MNEMONIC("setno Eb");
2058 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2059 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2060
2061 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2062 * any way. AMD says it's "unused", whatever that means. We're
2063 * ignoring for now. */
2064 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2065 {
2066 /* register target */
2067 IEM_MC_BEGIN(0, 0);
2068 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2069 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2070 } IEM_MC_ELSE() {
2071 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2072 } IEM_MC_ENDIF();
2073 IEM_MC_ADVANCE_RIP();
2074 IEM_MC_END();
2075 }
2076 else
2077 {
2078 /* memory target */
2079 IEM_MC_BEGIN(0, 1);
2080 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2081 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2082 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2083 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2084 } IEM_MC_ELSE() {
2085 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2086 } IEM_MC_ENDIF();
2087 IEM_MC_ADVANCE_RIP();
2088 IEM_MC_END();
2089 }
2090 return VINF_SUCCESS;
2091}
2092
2093
2094/** Opcode 0x0f 0x92. */
2095FNIEMOP_DEF(iemOp_setc_Eb)
2096{
2097 IEMOP_MNEMONIC("setc Eb");
2098 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2099 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2100
2101 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2102 * any way. AMD says it's "unused", whatever that means. We're
2103 * ignoring for now. */
2104 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2105 {
2106 /* register target */
2107 IEM_MC_BEGIN(0, 0);
2108 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2109 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2110 } IEM_MC_ELSE() {
2111 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2112 } IEM_MC_ENDIF();
2113 IEM_MC_ADVANCE_RIP();
2114 IEM_MC_END();
2115 }
2116 else
2117 {
2118 /* memory target */
2119 IEM_MC_BEGIN(0, 1);
2120 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2121 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2122 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2123 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2124 } IEM_MC_ELSE() {
2125 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2126 } IEM_MC_ENDIF();
2127 IEM_MC_ADVANCE_RIP();
2128 IEM_MC_END();
2129 }
2130 return VINF_SUCCESS;
2131}
2132
2133
2134/** Opcode 0x0f 0x93. */
2135FNIEMOP_DEF(iemOp_setnc_Eb)
2136{
2137 IEMOP_MNEMONIC("setnc Eb");
2138 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2139 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2140
2141 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2142 * any way. AMD says it's "unused", whatever that means. We're
2143 * ignoring for now. */
2144 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2145 {
2146 /* register target */
2147 IEM_MC_BEGIN(0, 0);
2148 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2149 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2150 } IEM_MC_ELSE() {
2151 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2152 } IEM_MC_ENDIF();
2153 IEM_MC_ADVANCE_RIP();
2154 IEM_MC_END();
2155 }
2156 else
2157 {
2158 /* memory target */
2159 IEM_MC_BEGIN(0, 1);
2160 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2161 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2162 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2163 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2164 } IEM_MC_ELSE() {
2165 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2166 } IEM_MC_ENDIF();
2167 IEM_MC_ADVANCE_RIP();
2168 IEM_MC_END();
2169 }
2170 return VINF_SUCCESS;
2171}
2172
2173
2174/** Opcode 0x0f 0x94. */
2175FNIEMOP_DEF(iemOp_sete_Eb)
2176{
2177 IEMOP_MNEMONIC("sete Eb");
2178 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2179 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2180
2181 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2182 * any way. AMD says it's "unused", whatever that means. We're
2183 * ignoring for now. */
2184 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2185 {
2186 /* register target */
2187 IEM_MC_BEGIN(0, 0);
2188 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2189 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2190 } IEM_MC_ELSE() {
2191 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2192 } IEM_MC_ENDIF();
2193 IEM_MC_ADVANCE_RIP();
2194 IEM_MC_END();
2195 }
2196 else
2197 {
2198 /* memory target */
2199 IEM_MC_BEGIN(0, 1);
2200 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2201 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2202 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2203 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2204 } IEM_MC_ELSE() {
2205 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2206 } IEM_MC_ENDIF();
2207 IEM_MC_ADVANCE_RIP();
2208 IEM_MC_END();
2209 }
2210 return VINF_SUCCESS;
2211}
2212
2213
2214/** Opcode 0x0f 0x95. */
2215FNIEMOP_DEF(iemOp_setne_Eb)
2216{
2217 IEMOP_MNEMONIC("setne Eb");
2218 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2219 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2220
2221 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2222 * any way. AMD says it's "unused", whatever that means. We're
2223 * ignoring for now. */
2224 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2225 {
2226 /* register target */
2227 IEM_MC_BEGIN(0, 0);
2228 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2229 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2230 } IEM_MC_ELSE() {
2231 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2232 } IEM_MC_ENDIF();
2233 IEM_MC_ADVANCE_RIP();
2234 IEM_MC_END();
2235 }
2236 else
2237 {
2238 /* memory target */
2239 IEM_MC_BEGIN(0, 1);
2240 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2241 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2242 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2243 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2244 } IEM_MC_ELSE() {
2245 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2246 } IEM_MC_ENDIF();
2247 IEM_MC_ADVANCE_RIP();
2248 IEM_MC_END();
2249 }
2250 return VINF_SUCCESS;
2251}
2252
2253
2254/** Opcode 0x0f 0x96. */
2255FNIEMOP_DEF(iemOp_setbe_Eb)
2256{
2257 IEMOP_MNEMONIC("setbe Eb");
2258 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2259 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2260
2261 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2262 * any way. AMD says it's "unused", whatever that means. We're
2263 * ignoring for now. */
2264 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2265 {
2266 /* register target */
2267 IEM_MC_BEGIN(0, 0);
2268 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2269 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2270 } IEM_MC_ELSE() {
2271 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2272 } IEM_MC_ENDIF();
2273 IEM_MC_ADVANCE_RIP();
2274 IEM_MC_END();
2275 }
2276 else
2277 {
2278 /* memory target */
2279 IEM_MC_BEGIN(0, 1);
2280 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2281 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2282 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2283 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2284 } IEM_MC_ELSE() {
2285 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2286 } IEM_MC_ENDIF();
2287 IEM_MC_ADVANCE_RIP();
2288 IEM_MC_END();
2289 }
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/** Opcode 0x0f 0x97. */
2295FNIEMOP_DEF(iemOp_setnbe_Eb)
2296{
2297 IEMOP_MNEMONIC("setnbe Eb");
2298 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2299 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2300
2301 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2302 * any way. AMD says it's "unused", whatever that means. We're
2303 * ignoring for now. */
2304 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2305 {
2306 /* register target */
2307 IEM_MC_BEGIN(0, 0);
2308 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2309 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2310 } IEM_MC_ELSE() {
2311 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2312 } IEM_MC_ENDIF();
2313 IEM_MC_ADVANCE_RIP();
2314 IEM_MC_END();
2315 }
2316 else
2317 {
2318 /* memory target */
2319 IEM_MC_BEGIN(0, 1);
2320 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2321 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2322 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2323 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2324 } IEM_MC_ELSE() {
2325 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2326 } IEM_MC_ENDIF();
2327 IEM_MC_ADVANCE_RIP();
2328 IEM_MC_END();
2329 }
2330 return VINF_SUCCESS;
2331}
2332
2333
2334/** Opcode 0x0f 0x98. */
2335FNIEMOP_DEF(iemOp_sets_Eb)
2336{
2337 IEMOP_MNEMONIC("sets Eb");
2338 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2339 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2340
2341 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2342 * any way. AMD says it's "unused", whatever that means. We're
2343 * ignoring for now. */
2344 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2345 {
2346 /* register target */
2347 IEM_MC_BEGIN(0, 0);
2348 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2349 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2350 } IEM_MC_ELSE() {
2351 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2352 } IEM_MC_ENDIF();
2353 IEM_MC_ADVANCE_RIP();
2354 IEM_MC_END();
2355 }
2356 else
2357 {
2358 /* memory target */
2359 IEM_MC_BEGIN(0, 1);
2360 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2361 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2362 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2363 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2364 } IEM_MC_ELSE() {
2365 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2366 } IEM_MC_ENDIF();
2367 IEM_MC_ADVANCE_RIP();
2368 IEM_MC_END();
2369 }
2370 return VINF_SUCCESS;
2371}
2372
2373
2374/** Opcode 0x0f 0x99. */
2375FNIEMOP_DEF(iemOp_setns_Eb)
2376{
2377 IEMOP_MNEMONIC("setns Eb");
2378 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2379 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2380
2381 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2382 * any way. AMD says it's "unused", whatever that means. We're
2383 * ignoring for now. */
2384 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2385 {
2386 /* register target */
2387 IEM_MC_BEGIN(0, 0);
2388 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2389 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2390 } IEM_MC_ELSE() {
2391 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2392 } IEM_MC_ENDIF();
2393 IEM_MC_ADVANCE_RIP();
2394 IEM_MC_END();
2395 }
2396 else
2397 {
2398 /* memory target */
2399 IEM_MC_BEGIN(0, 1);
2400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2401 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2402 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2403 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2404 } IEM_MC_ELSE() {
2405 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2406 } IEM_MC_ENDIF();
2407 IEM_MC_ADVANCE_RIP();
2408 IEM_MC_END();
2409 }
2410 return VINF_SUCCESS;
2411}
2412
2413
2414/** Opcode 0x0f 0x9a. */
2415FNIEMOP_DEF(iemOp_setp_Eb)
2416{
2417 IEMOP_MNEMONIC("setnp Eb");
2418 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2419 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2420
2421 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2422 * any way. AMD says it's "unused", whatever that means. We're
2423 * ignoring for now. */
2424 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2425 {
2426 /* register target */
2427 IEM_MC_BEGIN(0, 0);
2428 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2429 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2430 } IEM_MC_ELSE() {
2431 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2432 } IEM_MC_ENDIF();
2433 IEM_MC_ADVANCE_RIP();
2434 IEM_MC_END();
2435 }
2436 else
2437 {
2438 /* memory target */
2439 IEM_MC_BEGIN(0, 1);
2440 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2441 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2442 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2443 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2444 } IEM_MC_ELSE() {
2445 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2446 } IEM_MC_ENDIF();
2447 IEM_MC_ADVANCE_RIP();
2448 IEM_MC_END();
2449 }
2450 return VINF_SUCCESS;
2451}
2452
2453
2454/** Opcode 0x0f 0x9b. */
2455FNIEMOP_DEF(iemOp_setnp_Eb)
2456{
2457 IEMOP_MNEMONIC("setnp Eb");
2458 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2459 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2460
2461 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2462 * any way. AMD says it's "unused", whatever that means. We're
2463 * ignoring for now. */
2464 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2465 {
2466 /* register target */
2467 IEM_MC_BEGIN(0, 0);
2468 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2469 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2470 } IEM_MC_ELSE() {
2471 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2472 } IEM_MC_ENDIF();
2473 IEM_MC_ADVANCE_RIP();
2474 IEM_MC_END();
2475 }
2476 else
2477 {
2478 /* memory target */
2479 IEM_MC_BEGIN(0, 1);
2480 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2481 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2482 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2483 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2484 } IEM_MC_ELSE() {
2485 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2486 } IEM_MC_ENDIF();
2487 IEM_MC_ADVANCE_RIP();
2488 IEM_MC_END();
2489 }
2490 return VINF_SUCCESS;
2491}
2492
2493
2494/** Opcode 0x0f 0x9c. */
2495FNIEMOP_DEF(iemOp_setl_Eb)
2496{
2497 IEMOP_MNEMONIC("setl Eb");
2498 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2499 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2500
2501 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2502 * any way. AMD says it's "unused", whatever that means. We're
2503 * ignoring for now. */
2504 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2505 {
2506 /* register target */
2507 IEM_MC_BEGIN(0, 0);
2508 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2509 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2510 } IEM_MC_ELSE() {
2511 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2512 } IEM_MC_ENDIF();
2513 IEM_MC_ADVANCE_RIP();
2514 IEM_MC_END();
2515 }
2516 else
2517 {
2518 /* memory target */
2519 IEM_MC_BEGIN(0, 1);
2520 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2521 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2522 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2523 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2524 } IEM_MC_ELSE() {
2525 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2526 } IEM_MC_ENDIF();
2527 IEM_MC_ADVANCE_RIP();
2528 IEM_MC_END();
2529 }
2530 return VINF_SUCCESS;
2531}
2532
2533
2534/** Opcode 0x0f 0x9d. */
2535FNIEMOP_DEF(iemOp_setnl_Eb)
2536{
2537 IEMOP_MNEMONIC("setnl Eb");
2538 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2539 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2540
2541 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2542 * any way. AMD says it's "unused", whatever that means. We're
2543 * ignoring for now. */
2544 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2545 {
2546 /* register target */
2547 IEM_MC_BEGIN(0, 0);
2548 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2549 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2550 } IEM_MC_ELSE() {
2551 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2552 } IEM_MC_ENDIF();
2553 IEM_MC_ADVANCE_RIP();
2554 IEM_MC_END();
2555 }
2556 else
2557 {
2558 /* memory target */
2559 IEM_MC_BEGIN(0, 1);
2560 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2561 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2562 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2563 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2564 } IEM_MC_ELSE() {
2565 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2566 } IEM_MC_ENDIF();
2567 IEM_MC_ADVANCE_RIP();
2568 IEM_MC_END();
2569 }
2570 return VINF_SUCCESS;
2571}
2572
2573
2574/** Opcode 0x0f 0x9e. */
2575FNIEMOP_DEF(iemOp_setle_Eb)
2576{
2577 IEMOP_MNEMONIC("setle Eb");
2578 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2579 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2580
2581 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2582 * any way. AMD says it's "unused", whatever that means. We're
2583 * ignoring for now. */
2584 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2585 {
2586 /* register target */
2587 IEM_MC_BEGIN(0, 0);
2588 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2589 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2590 } IEM_MC_ELSE() {
2591 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2592 } IEM_MC_ENDIF();
2593 IEM_MC_ADVANCE_RIP();
2594 IEM_MC_END();
2595 }
2596 else
2597 {
2598 /* memory target */
2599 IEM_MC_BEGIN(0, 1);
2600 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2601 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2602 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2603 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2604 } IEM_MC_ELSE() {
2605 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2606 } IEM_MC_ENDIF();
2607 IEM_MC_ADVANCE_RIP();
2608 IEM_MC_END();
2609 }
2610 return VINF_SUCCESS;
2611}
2612
2613
2614/** Opcode 0x0f 0x9f. */
2615FNIEMOP_DEF(iemOp_setnle_Eb)
2616{
2617 IEMOP_MNEMONIC("setnle Eb");
2618 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2619 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2620
2621 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2622 * any way. AMD says it's "unused", whatever that means. We're
2623 * ignoring for now. */
2624 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2625 {
2626 /* register target */
2627 IEM_MC_BEGIN(0, 0);
2628 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2629 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2630 } IEM_MC_ELSE() {
2631 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2632 } IEM_MC_ENDIF();
2633 IEM_MC_ADVANCE_RIP();
2634 IEM_MC_END();
2635 }
2636 else
2637 {
2638 /* memory target */
2639 IEM_MC_BEGIN(0, 1);
2640 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2641 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2642 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2643 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2644 } IEM_MC_ELSE() {
2645 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2646 } IEM_MC_ENDIF();
2647 IEM_MC_ADVANCE_RIP();
2648 IEM_MC_END();
2649 }
2650 return VINF_SUCCESS;
2651}
2652
2653
2654/**
2655 * Common 'push segment-register' helper.
2656 */
2657FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2658{
2659 IEMOP_HLP_NO_LOCK_PREFIX();
2660 if (iReg < X86_SREG_FS)
2661 IEMOP_HLP_NO_64BIT();
2662 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2663
2664 switch (pIemCpu->enmEffOpSize)
2665 {
2666 case IEMMODE_16BIT:
2667 IEM_MC_BEGIN(0, 1);
2668 IEM_MC_LOCAL(uint16_t, u16Value);
2669 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2670 IEM_MC_PUSH_U16(u16Value);
2671 IEM_MC_ADVANCE_RIP();
2672 IEM_MC_END();
2673 break;
2674
2675 case IEMMODE_32BIT:
2676 IEM_MC_BEGIN(0, 1);
2677 IEM_MC_LOCAL(uint32_t, u32Value);
2678 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2679 IEM_MC_PUSH_U32(u32Value);
2680 IEM_MC_ADVANCE_RIP();
2681 IEM_MC_END();
2682 break;
2683
2684 case IEMMODE_64BIT:
2685 IEM_MC_BEGIN(0, 1);
2686 IEM_MC_LOCAL(uint64_t, u64Value);
2687 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2688 IEM_MC_PUSH_U64(u64Value);
2689 IEM_MC_ADVANCE_RIP();
2690 IEM_MC_END();
2691 break;
2692 }
2693
2694 return VINF_SUCCESS;
2695}
2696
2697
2698/** Opcode 0x0f 0xa0. */
2699FNIEMOP_DEF(iemOp_push_fs)
2700{
2701 IEMOP_MNEMONIC("push fs");
2702 IEMOP_HLP_NO_LOCK_PREFIX();
2703 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2704}
2705
2706
2707/** Opcode 0x0f 0xa1. */
2708FNIEMOP_DEF(iemOp_pop_fs)
2709{
2710 IEMOP_MNEMONIC("pop fs");
2711 IEMOP_HLP_NO_LOCK_PREFIX();
2712 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2713}
2714
2715
2716/** Opcode 0x0f 0xa2. */
2717FNIEMOP_DEF(iemOp_cpuid)
2718{
2719 IEMOP_MNEMONIC("cpuid");
2720 IEMOP_HLP_NO_LOCK_PREFIX();
2721 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2722}
2723
2724
2725/**
2726 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2727 * iemOp_bts_Ev_Gv.
2728 */
2729FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2730{
2731 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2732 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2733
2734 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2735 {
2736 /* register destination. */
2737 IEMOP_HLP_NO_LOCK_PREFIX();
2738 switch (pIemCpu->enmEffOpSize)
2739 {
2740 case IEMMODE_16BIT:
2741 IEM_MC_BEGIN(3, 0);
2742 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2743 IEM_MC_ARG(uint16_t, u16Src, 1);
2744 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2745
2746 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2747 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2748 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2749 IEM_MC_REF_EFLAGS(pEFlags);
2750 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2751
2752 IEM_MC_ADVANCE_RIP();
2753 IEM_MC_END();
2754 return VINF_SUCCESS;
2755
2756 case IEMMODE_32BIT:
2757 IEM_MC_BEGIN(3, 0);
2758 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2759 IEM_MC_ARG(uint32_t, u32Src, 1);
2760 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2761
2762 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2763 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2764 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2765 IEM_MC_REF_EFLAGS(pEFlags);
2766 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2767
2768 IEM_MC_ADVANCE_RIP();
2769 IEM_MC_END();
2770 return VINF_SUCCESS;
2771
2772 case IEMMODE_64BIT:
2773 IEM_MC_BEGIN(3, 0);
2774 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2775 IEM_MC_ARG(uint64_t, u64Src, 1);
2776 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2777
2778 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2779 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2780 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2781 IEM_MC_REF_EFLAGS(pEFlags);
2782 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2783
2784 IEM_MC_ADVANCE_RIP();
2785 IEM_MC_END();
2786 return VINF_SUCCESS;
2787
2788 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2789 }
2790 }
2791 else
2792 {
2793 /* memory destination. */
2794
2795 uint32_t fAccess;
2796 if (pImpl->pfnLockedU16)
2797 fAccess = IEM_ACCESS_DATA_RW;
2798 else /* BT */
2799 {
2800 IEMOP_HLP_NO_LOCK_PREFIX();
2801 fAccess = IEM_ACCESS_DATA_R;
2802 }
2803
2804 /** @todo test negative bit offsets! */
2805 switch (pIemCpu->enmEffOpSize)
2806 {
2807 case IEMMODE_16BIT:
2808 IEM_MC_BEGIN(3, 2);
2809 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2810 IEM_MC_ARG(uint16_t, u16Src, 1);
2811 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2812 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2813 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2814
2815 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2816 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2817 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2818 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2819 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2820 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2821 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2822 IEM_MC_FETCH_EFLAGS(EFlags);
2823
2824 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2825 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2826 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2827 else
2828 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2829 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2830
2831 IEM_MC_COMMIT_EFLAGS(EFlags);
2832 IEM_MC_ADVANCE_RIP();
2833 IEM_MC_END();
2834 return VINF_SUCCESS;
2835
2836 case IEMMODE_32BIT:
2837 IEM_MC_BEGIN(3, 2);
2838 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2839 IEM_MC_ARG(uint32_t, u32Src, 1);
2840 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2841 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2842 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2843
2844 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2845 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2846 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2847 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2848 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2849 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2850 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2851 IEM_MC_FETCH_EFLAGS(EFlags);
2852
2853 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2854 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2855 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2856 else
2857 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2858 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2859
2860 IEM_MC_COMMIT_EFLAGS(EFlags);
2861 IEM_MC_ADVANCE_RIP();
2862 IEM_MC_END();
2863 return VINF_SUCCESS;
2864
2865 case IEMMODE_64BIT:
2866 IEM_MC_BEGIN(3, 2);
2867 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2868 IEM_MC_ARG(uint64_t, u64Src, 1);
2869 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2870 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2871 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2872
2873 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2874 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2875 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2876 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2877 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2878 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2879 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2880 IEM_MC_FETCH_EFLAGS(EFlags);
2881
2882 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2883 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2884 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2885 else
2886 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2887 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2888
2889 IEM_MC_COMMIT_EFLAGS(EFlags);
2890 IEM_MC_ADVANCE_RIP();
2891 IEM_MC_END();
2892 return VINF_SUCCESS;
2893
2894 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2895 }
2896 }
2897}
2898
2899
2900/** Opcode 0x0f 0xa3. */
2901FNIEMOP_DEF(iemOp_bt_Ev_Gv)
2902{
2903 IEMOP_MNEMONIC("bt Gv,Gv");
2904 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
2905}
2906
2907
2908/**
2909 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
2910 */
2911FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
2912{
2913 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2914 IEMOP_HLP_NO_LOCK_PREFIX();
2915 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
2916
2917 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2918 {
2919 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2920 IEMOP_HLP_NO_LOCK_PREFIX();
2921
2922 switch (pIemCpu->enmEffOpSize)
2923 {
2924 case IEMMODE_16BIT:
2925 IEM_MC_BEGIN(4, 0);
2926 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2927 IEM_MC_ARG(uint16_t, u16Src, 1);
2928 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2929 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2930
2931 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2932 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2933 IEM_MC_REF_EFLAGS(pEFlags);
2934 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2935
2936 IEM_MC_ADVANCE_RIP();
2937 IEM_MC_END();
2938 return VINF_SUCCESS;
2939
2940 case IEMMODE_32BIT:
2941 IEM_MC_BEGIN(4, 0);
2942 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2943 IEM_MC_ARG(uint32_t, u32Src, 1);
2944 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2945 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2946
2947 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2948 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2949 IEM_MC_REF_EFLAGS(pEFlags);
2950 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
2951
2952 IEM_MC_ADVANCE_RIP();
2953 IEM_MC_END();
2954 return VINF_SUCCESS;
2955
2956 case IEMMODE_64BIT:
2957 IEM_MC_BEGIN(4, 0);
2958 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2959 IEM_MC_ARG(uint64_t, u64Src, 1);
2960 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2961 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2962
2963 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2964 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2965 IEM_MC_REF_EFLAGS(pEFlags);
2966 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
2967
2968 IEM_MC_ADVANCE_RIP();
2969 IEM_MC_END();
2970 return VINF_SUCCESS;
2971
2972 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2973 }
2974 }
2975 else
2976 {
2977 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2978
2979 switch (pIemCpu->enmEffOpSize)
2980 {
2981 case IEMMODE_16BIT:
2982 IEM_MC_BEGIN(4, 2);
2983 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2984 IEM_MC_ARG(uint16_t, u16Src, 1);
2985 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2986 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2987 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2988
2989 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2990 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2991 IEM_MC_ASSIGN(cShiftArg, cShift);
2992 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2993 IEM_MC_FETCH_EFLAGS(EFlags);
2994 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2995 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2996
2997 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2998 IEM_MC_COMMIT_EFLAGS(EFlags);
2999 IEM_MC_ADVANCE_RIP();
3000 IEM_MC_END();
3001 return VINF_SUCCESS;
3002
3003 case IEMMODE_32BIT:
3004 IEM_MC_BEGIN(4, 2);
3005 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3006 IEM_MC_ARG(uint32_t, u32Src, 1);
3007 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3008 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3009 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3010
3011 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3012 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3013 IEM_MC_ASSIGN(cShiftArg, cShift);
3014 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3015 IEM_MC_FETCH_EFLAGS(EFlags);
3016 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3017 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3018
3019 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3020 IEM_MC_COMMIT_EFLAGS(EFlags);
3021 IEM_MC_ADVANCE_RIP();
3022 IEM_MC_END();
3023 return VINF_SUCCESS;
3024
3025 case IEMMODE_64BIT:
3026 IEM_MC_BEGIN(4, 2);
3027 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3028 IEM_MC_ARG(uint64_t, u64Src, 1);
3029 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3030 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3031 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3032
3033 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3034 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3035 IEM_MC_ASSIGN(cShiftArg, cShift);
3036 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3037 IEM_MC_FETCH_EFLAGS(EFlags);
3038 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3039 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3040
3041 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3042 IEM_MC_COMMIT_EFLAGS(EFlags);
3043 IEM_MC_ADVANCE_RIP();
3044 IEM_MC_END();
3045 return VINF_SUCCESS;
3046
3047 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3048 }
3049 }
3050}
3051
3052
3053/**
3054 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3055 */
3056FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3057{
3058 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3059 IEMOP_HLP_NO_LOCK_PREFIX();
3060 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3061
3062 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3063 {
3064 IEMOP_HLP_NO_LOCK_PREFIX();
3065
3066 switch (pIemCpu->enmEffOpSize)
3067 {
3068 case IEMMODE_16BIT:
3069 IEM_MC_BEGIN(4, 0);
3070 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3071 IEM_MC_ARG(uint16_t, u16Src, 1);
3072 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3073 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3074
3075 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3076 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3077 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3078 IEM_MC_REF_EFLAGS(pEFlags);
3079 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3080
3081 IEM_MC_ADVANCE_RIP();
3082 IEM_MC_END();
3083 return VINF_SUCCESS;
3084
3085 case IEMMODE_32BIT:
3086 IEM_MC_BEGIN(4, 0);
3087 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3088 IEM_MC_ARG(uint32_t, u32Src, 1);
3089 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3090 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3091
3092 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3093 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3094 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3095 IEM_MC_REF_EFLAGS(pEFlags);
3096 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3097
3098 IEM_MC_ADVANCE_RIP();
3099 IEM_MC_END();
3100 return VINF_SUCCESS;
3101
3102 case IEMMODE_64BIT:
3103 IEM_MC_BEGIN(4, 0);
3104 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3105 IEM_MC_ARG(uint64_t, u64Src, 1);
3106 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3107 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3108
3109 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3110 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3111 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3112 IEM_MC_REF_EFLAGS(pEFlags);
3113 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3114
3115 IEM_MC_ADVANCE_RIP();
3116 IEM_MC_END();
3117 return VINF_SUCCESS;
3118
3119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3120 }
3121 }
3122 else
3123 {
3124 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3125
3126 switch (pIemCpu->enmEffOpSize)
3127 {
3128 case IEMMODE_16BIT:
3129 IEM_MC_BEGIN(4, 2);
3130 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3131 IEM_MC_ARG(uint16_t, u16Src, 1);
3132 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3133 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3135
3136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3137 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3138 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3139 IEM_MC_FETCH_EFLAGS(EFlags);
3140 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3141 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3142
3143 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3144 IEM_MC_COMMIT_EFLAGS(EFlags);
3145 IEM_MC_ADVANCE_RIP();
3146 IEM_MC_END();
3147 return VINF_SUCCESS;
3148
3149 case IEMMODE_32BIT:
3150 IEM_MC_BEGIN(4, 2);
3151 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3152 IEM_MC_ARG(uint32_t, u32Src, 1);
3153 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3154 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3155 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3156
3157 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3158 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3159 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3160 IEM_MC_FETCH_EFLAGS(EFlags);
3161 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3162 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3163
3164 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3165 IEM_MC_COMMIT_EFLAGS(EFlags);
3166 IEM_MC_ADVANCE_RIP();
3167 IEM_MC_END();
3168 return VINF_SUCCESS;
3169
3170 case IEMMODE_64BIT:
3171 IEM_MC_BEGIN(4, 2);
3172 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3173 IEM_MC_ARG(uint64_t, u64Src, 1);
3174 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3175 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3176 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3177
3178 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3179 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3180 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3181 IEM_MC_FETCH_EFLAGS(EFlags);
3182 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3183 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3184
3185 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3186 IEM_MC_COMMIT_EFLAGS(EFlags);
3187 IEM_MC_ADVANCE_RIP();
3188 IEM_MC_END();
3189 return VINF_SUCCESS;
3190
3191 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3192 }
3193 }
3194}
3195
3196
3197
3198/** Opcode 0x0f 0xa4. */
3199FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3200{
3201 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3202 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3203}
3204
3205
3206/** Opcode 0x0f 0xa7. */
3207FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3208{
3209 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3210 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3211}
3212
3213
3214/** Opcode 0x0f 0xa8. */
3215FNIEMOP_DEF(iemOp_push_gs)
3216{
3217 IEMOP_MNEMONIC("push gs");
3218 IEMOP_HLP_NO_LOCK_PREFIX();
3219 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3220}
3221
3222
3223/** Opcode 0x0f 0xa9. */
3224FNIEMOP_DEF(iemOp_pop_gs)
3225{
3226 IEMOP_MNEMONIC("pop gs");
3227 IEMOP_HLP_NO_LOCK_PREFIX();
3228 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3229}
3230
3231
3232/** Opcode 0x0f 0xaa. */
3233FNIEMOP_STUB(iemOp_rsm);
3234
3235
3236/** Opcode 0x0f 0xab. */
3237FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3238{
3239 IEMOP_MNEMONIC("bts Ev,Gv");
3240 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3241}
3242
3243
3244/** Opcode 0x0f 0xac. */
3245FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3246{
3247 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3248 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3249}
3250
3251
3252/** Opcode 0x0f 0xad. */
3253FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3254{
3255 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3256 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3257}
3258
3259
3260/** Opcode 0x0f 0xae. */
3261FNIEMOP_STUB(iemOp_Grp15);
3262
3263
3264/** Opcode 0x0f 0xaf. */
3265FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3266{
3267 IEMOP_MNEMONIC("imul Gv,Ev");
3268 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3269 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3270}
3271
3272
3273/** Opcode 0x0f 0xb0. */
3274FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3275/** Opcode 0x0f 0xb1. */
3276FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3277
3278
3279FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3280{
3281 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3282 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3283
3284 /* The source cannot be a register. */
3285 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3286 return IEMOP_RAISE_INVALID_OPCODE();
3287 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3288
3289 switch (pIemCpu->enmEffOpSize)
3290 {
3291 case IEMMODE_16BIT:
3292 IEM_MC_BEGIN(5, 1);
3293 IEM_MC_ARG(uint16_t, uSel, 0);
3294 IEM_MC_ARG(uint16_t, offSeg, 1);
3295 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3296 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3297 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3298 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3299 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3300 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3301 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
3302 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3303 IEM_MC_END();
3304 return VINF_SUCCESS;
3305
3306 case IEMMODE_32BIT:
3307 IEM_MC_BEGIN(5, 1);
3308 IEM_MC_ARG(uint16_t, uSel, 0);
3309 IEM_MC_ARG(uint32_t, offSeg, 1);
3310 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3311 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3312 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3313 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3314 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3315 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3316 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
3317 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3318 IEM_MC_END();
3319 return VINF_SUCCESS;
3320
3321 case IEMMODE_64BIT:
3322 IEM_MC_BEGIN(5, 1);
3323 IEM_MC_ARG(uint16_t, uSel, 0);
3324 IEM_MC_ARG(uint64_t, offSeg, 1);
3325 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3326 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3327 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3328 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3329 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3330 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3331 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
3332 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3333 IEM_MC_END();
3334 return VINF_SUCCESS;
3335
3336 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3337 }
3338}
3339
3340
3341/** Opcode 0x0f 0xb2. */
3342FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3343{
3344 IEMOP_MNEMONIC("lss Gv,Mp");
3345 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3346}
3347
3348
3349/** Opcode 0x0f 0xb3. */
3350FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3351{
3352 IEMOP_MNEMONIC("btr Ev,Gv");
3353 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3354}
3355
3356
3357/** Opcode 0x0f 0xb4. */
3358FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3359{
3360 IEMOP_MNEMONIC("lfs Gv,Mp");
3361 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3362}
3363
3364
3365/** Opcode 0x0f 0xb5. */
3366FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3367{
3368 IEMOP_MNEMONIC("lgs Gv,Mp");
3369 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3370}
3371
3372
3373/** Opcode 0x0f 0xb6. */
3374FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3375{
3376 IEMOP_MNEMONIC("movzx Gv,Eb");
3377
3378 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3379 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3380
3381 /*
3382 * If rm is denoting a register, no more instruction bytes.
3383 */
3384 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3385 {
3386 switch (pIemCpu->enmEffOpSize)
3387 {
3388 case IEMMODE_16BIT:
3389 IEM_MC_BEGIN(0, 1);
3390 IEM_MC_LOCAL(uint16_t, u16Value);
3391 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3392 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3393 IEM_MC_ADVANCE_RIP();
3394 IEM_MC_END();
3395 return VINF_SUCCESS;
3396
3397 case IEMMODE_32BIT:
3398 IEM_MC_BEGIN(0, 1);
3399 IEM_MC_LOCAL(uint32_t, u32Value);
3400 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3401 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3402 IEM_MC_ADVANCE_RIP();
3403 IEM_MC_END();
3404 return VINF_SUCCESS;
3405
3406 case IEMMODE_64BIT:
3407 IEM_MC_BEGIN(0, 1);
3408 IEM_MC_LOCAL(uint64_t, u64Value);
3409 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3410 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3411 IEM_MC_ADVANCE_RIP();
3412 IEM_MC_END();
3413 return VINF_SUCCESS;
3414
3415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3416 }
3417 }
3418 else
3419 {
3420 /*
3421 * We're loading a register from memory.
3422 */
3423 switch (pIemCpu->enmEffOpSize)
3424 {
3425 case IEMMODE_16BIT:
3426 IEM_MC_BEGIN(0, 2);
3427 IEM_MC_LOCAL(uint16_t, u16Value);
3428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3429 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3430 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3431 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3432 IEM_MC_ADVANCE_RIP();
3433 IEM_MC_END();
3434 return VINF_SUCCESS;
3435
3436 case IEMMODE_32BIT:
3437 IEM_MC_BEGIN(0, 2);
3438 IEM_MC_LOCAL(uint32_t, u32Value);
3439 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3440 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3441 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3442 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3443 IEM_MC_ADVANCE_RIP();
3444 IEM_MC_END();
3445 return VINF_SUCCESS;
3446
3447 case IEMMODE_64BIT:
3448 IEM_MC_BEGIN(0, 2);
3449 IEM_MC_LOCAL(uint64_t, u64Value);
3450 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3451 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3452 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3453 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3454 IEM_MC_ADVANCE_RIP();
3455 IEM_MC_END();
3456 return VINF_SUCCESS;
3457
3458 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3459 }
3460 }
3461}
3462
3463
3464/** Opcode 0x0f 0xb7. */
3465FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3466{
3467 IEMOP_MNEMONIC("movzx Gv,Ew");
3468
3469 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3470 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3471
3472 /** @todo Not entirely sure how the operand size prefix is handled here,
3473 * assuming that it will be ignored. Would be nice to have a few
3474 * test for this. */
3475 /*
3476 * If rm is denoting a register, no more instruction bytes.
3477 */
3478 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3479 {
3480 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3481 {
3482 IEM_MC_BEGIN(0, 1);
3483 IEM_MC_LOCAL(uint32_t, u32Value);
3484 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3485 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3486 IEM_MC_ADVANCE_RIP();
3487 IEM_MC_END();
3488 }
3489 else
3490 {
3491 IEM_MC_BEGIN(0, 1);
3492 IEM_MC_LOCAL(uint64_t, u64Value);
3493 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3494 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3495 IEM_MC_ADVANCE_RIP();
3496 IEM_MC_END();
3497 }
3498 }
3499 else
3500 {
3501 /*
3502 * We're loading a register from memory.
3503 */
3504 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3505 {
3506 IEM_MC_BEGIN(0, 2);
3507 IEM_MC_LOCAL(uint32_t, u32Value);
3508 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3509 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3510 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3511 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3512 IEM_MC_ADVANCE_RIP();
3513 IEM_MC_END();
3514 }
3515 else
3516 {
3517 IEM_MC_BEGIN(0, 2);
3518 IEM_MC_LOCAL(uint64_t, u64Value);
3519 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3520 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3521 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3522 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3523 IEM_MC_ADVANCE_RIP();
3524 IEM_MC_END();
3525 }
3526 }
3527 return VINF_SUCCESS;
3528}
3529
3530
3531/** Opcode 0x0f 0xb8. */
3532FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3533/** Opcode 0x0f 0xb9. */
3534FNIEMOP_STUB(iemOp_Grp10);
3535
3536
3537/** Opcode 0x0f 0xba. */
3538FNIEMOP_DEF(iemOp_Grp8)
3539{
3540 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3541 PCIEMOPBINSIZES pImpl;
3542 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3543 {
3544 case 0: case 1: case 2: case 3:
3545 return IEMOP_RAISE_INVALID_OPCODE();
3546 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3547 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3548 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3549 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3550 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3551 }
3552 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3553
3554 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3555 {
3556 /* register destination. */
3557 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3558 IEMOP_HLP_NO_LOCK_PREFIX();
3559
3560 switch (pIemCpu->enmEffOpSize)
3561 {
3562 case IEMMODE_16BIT:
3563 IEM_MC_BEGIN(3, 0);
3564 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3565 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3566 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3567
3568 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3569 IEM_MC_REF_EFLAGS(pEFlags);
3570 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3571
3572 IEM_MC_ADVANCE_RIP();
3573 IEM_MC_END();
3574 return VINF_SUCCESS;
3575
3576 case IEMMODE_32BIT:
3577 IEM_MC_BEGIN(3, 0);
3578 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3579 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3580 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3581
3582 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3583 IEM_MC_REF_EFLAGS(pEFlags);
3584 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3585
3586 IEM_MC_ADVANCE_RIP();
3587 IEM_MC_END();
3588 return VINF_SUCCESS;
3589
3590 case IEMMODE_64BIT:
3591 IEM_MC_BEGIN(3, 0);
3592 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3593 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3594 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3595
3596 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3597 IEM_MC_REF_EFLAGS(pEFlags);
3598 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3599
3600 IEM_MC_ADVANCE_RIP();
3601 IEM_MC_END();
3602 return VINF_SUCCESS;
3603
3604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3605 }
3606 }
3607 else
3608 {
3609 /* memory destination. */
3610
3611 uint32_t fAccess;
3612 if (pImpl->pfnLockedU16)
3613 fAccess = IEM_ACCESS_DATA_RW;
3614 else /* BT */
3615 {
3616 IEMOP_HLP_NO_LOCK_PREFIX();
3617 fAccess = IEM_ACCESS_DATA_R;
3618 }
3619
3620 /** @todo test negative bit offsets! */
3621 switch (pIemCpu->enmEffOpSize)
3622 {
3623 case IEMMODE_16BIT:
3624 IEM_MC_BEGIN(3, 1);
3625 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3626 IEM_MC_ARG(uint16_t, u16Src, 1);
3627 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3628 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3629
3630 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3631 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3632 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3633 IEM_MC_FETCH_EFLAGS(EFlags);
3634 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3635 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3636 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3637 else
3638 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3639 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3640
3641 IEM_MC_COMMIT_EFLAGS(EFlags);
3642 IEM_MC_ADVANCE_RIP();
3643 IEM_MC_END();
3644 return VINF_SUCCESS;
3645
3646 case IEMMODE_32BIT:
3647 IEM_MC_BEGIN(3, 1);
3648 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3649 IEM_MC_ARG(uint32_t, u32Src, 1);
3650 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3651 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3652
3653 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3654 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3655 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3656 IEM_MC_FETCH_EFLAGS(EFlags);
3657 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3658 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3659 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3660 else
3661 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3662 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3663
3664 IEM_MC_COMMIT_EFLAGS(EFlags);
3665 IEM_MC_ADVANCE_RIP();
3666 IEM_MC_END();
3667 return VINF_SUCCESS;
3668
3669 case IEMMODE_64BIT:
3670 IEM_MC_BEGIN(3, 1);
3671 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3672 IEM_MC_ARG(uint64_t, u64Src, 1);
3673 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3674 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3675
3676 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3677 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3678 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3679 IEM_MC_FETCH_EFLAGS(EFlags);
3680 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3681 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3682 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3683 else
3684 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3685 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3686
3687 IEM_MC_COMMIT_EFLAGS(EFlags);
3688 IEM_MC_ADVANCE_RIP();
3689 IEM_MC_END();
3690 return VINF_SUCCESS;
3691
3692 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3693 }
3694 }
3695
3696}
3697
3698
3699/** Opcode 0x0f 0xbb. */
3700FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3701{
3702 IEMOP_MNEMONIC("btc Ev,Gv");
3703 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3704}
3705
3706
3707/** Opcode 0x0f 0xbc. */
3708FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3709{
3710 IEMOP_MNEMONIC("bsf Gv,Ev");
3711 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3712 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3713}
3714
3715
3716/** Opcode 0x0f 0xbd. */
3717FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3718{
3719 IEMOP_MNEMONIC("bsr Gv,Ev");
3720 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3721 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3722}
3723
3724
3725/** Opcode 0x0f 0xbe. */
3726FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3727{
3728 IEMOP_MNEMONIC("movsx Gv,Eb");
3729
3730 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3731 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3732
3733 /*
3734 * If rm is denoting a register, no more instruction bytes.
3735 */
3736 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3737 {
3738 switch (pIemCpu->enmEffOpSize)
3739 {
3740 case IEMMODE_16BIT:
3741 IEM_MC_BEGIN(0, 1);
3742 IEM_MC_LOCAL(uint16_t, u16Value);
3743 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3744 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3745 IEM_MC_ADVANCE_RIP();
3746 IEM_MC_END();
3747 return VINF_SUCCESS;
3748
3749 case IEMMODE_32BIT:
3750 IEM_MC_BEGIN(0, 1);
3751 IEM_MC_LOCAL(uint32_t, u32Value);
3752 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3753 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3754 IEM_MC_ADVANCE_RIP();
3755 IEM_MC_END();
3756 return VINF_SUCCESS;
3757
3758 case IEMMODE_64BIT:
3759 IEM_MC_BEGIN(0, 1);
3760 IEM_MC_LOCAL(uint64_t, u64Value);
3761 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3762 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3763 IEM_MC_ADVANCE_RIP();
3764 IEM_MC_END();
3765 return VINF_SUCCESS;
3766
3767 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3768 }
3769 }
3770 else
3771 {
3772 /*
3773 * We're loading a register from memory.
3774 */
3775 switch (pIemCpu->enmEffOpSize)
3776 {
3777 case IEMMODE_16BIT:
3778 IEM_MC_BEGIN(0, 2);
3779 IEM_MC_LOCAL(uint16_t, u16Value);
3780 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3781 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3782 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3783 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3784 IEM_MC_ADVANCE_RIP();
3785 IEM_MC_END();
3786 return VINF_SUCCESS;
3787
3788 case IEMMODE_32BIT:
3789 IEM_MC_BEGIN(0, 2);
3790 IEM_MC_LOCAL(uint32_t, u32Value);
3791 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3792 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3793 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3794 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3795 IEM_MC_ADVANCE_RIP();
3796 IEM_MC_END();
3797 return VINF_SUCCESS;
3798
3799 case IEMMODE_64BIT:
3800 IEM_MC_BEGIN(0, 2);
3801 IEM_MC_LOCAL(uint64_t, u64Value);
3802 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3803 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3804 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3805 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3806 IEM_MC_ADVANCE_RIP();
3807 IEM_MC_END();
3808 return VINF_SUCCESS;
3809
3810 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3811 }
3812 }
3813}
3814
3815
3816/** Opcode 0x0f 0xbf. */
3817FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
3818{
3819 IEMOP_MNEMONIC("movsx Gv,Ew");
3820
3821 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3822 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3823
3824 /** @todo Not entirely sure how the operand size prefix is handled here,
3825 * assuming that it will be ignored. Would be nice to have a few
3826 * test for this. */
3827 /*
3828 * If rm is denoting a register, no more instruction bytes.
3829 */
3830 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3831 {
3832 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3833 {
3834 IEM_MC_BEGIN(0, 1);
3835 IEM_MC_LOCAL(uint32_t, u32Value);
3836 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3837 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3838 IEM_MC_ADVANCE_RIP();
3839 IEM_MC_END();
3840 }
3841 else
3842 {
3843 IEM_MC_BEGIN(0, 1);
3844 IEM_MC_LOCAL(uint64_t, u64Value);
3845 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3846 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3847 IEM_MC_ADVANCE_RIP();
3848 IEM_MC_END();
3849 }
3850 }
3851 else
3852 {
3853 /*
3854 * We're loading a register from memory.
3855 */
3856 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3857 {
3858 IEM_MC_BEGIN(0, 2);
3859 IEM_MC_LOCAL(uint32_t, u32Value);
3860 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3861 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3862 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3863 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3864 IEM_MC_ADVANCE_RIP();
3865 IEM_MC_END();
3866 }
3867 else
3868 {
3869 IEM_MC_BEGIN(0, 2);
3870 IEM_MC_LOCAL(uint64_t, u64Value);
3871 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3872 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3873 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3874 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3875 IEM_MC_ADVANCE_RIP();
3876 IEM_MC_END();
3877 }
3878 }
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/** Opcode 0x0f 0xc0. */
3884FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
3885{
3886 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3887 IEMOP_MNEMONIC("xadd Eb,Gb");
3888
3889 /*
3890 * If rm is denoting a register, no more instruction bytes.
3891 */
3892 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3893 {
3894 IEMOP_HLP_NO_LOCK_PREFIX();
3895
3896 IEM_MC_BEGIN(3, 0);
3897 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3898 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3899 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3900
3901 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3902 IEM_MC_REF_GREG_U8(pu8Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3903 IEM_MC_REF_EFLAGS(pEFlags);
3904 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3905
3906 IEM_MC_ADVANCE_RIP();
3907 IEM_MC_END();
3908 }
3909 else
3910 {
3911 /*
3912 * We're accessing memory.
3913 */
3914 IEM_MC_BEGIN(3, 3);
3915 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3916 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3917 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
3918 IEM_MC_LOCAL(uint8_t, u8RegCopy);
3919 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3920
3921 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3922 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
3923 IEM_MC_FETCH_GREG_U8(u8RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3924 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
3925 IEM_MC_FETCH_EFLAGS(EFlags);
3926 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3927 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3928 else
3929 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
3930
3931 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
3932 IEM_MC_COMMIT_EFLAGS(EFlags);
3933 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8RegCopy);
3934 IEM_MC_ADVANCE_RIP();
3935 IEM_MC_END();
3936 return VINF_SUCCESS;
3937 }
3938 return VINF_SUCCESS;
3939}
3940
3941
3942/** Opcode 0x0f 0xc1. */
3943FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
3944{
3945 IEMOP_MNEMONIC("xadd Ev,Gv");
3946 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3947
3948 /*
3949 * If rm is denoting a register, no more instruction bytes.
3950 */
3951 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3952 {
3953 IEMOP_HLP_NO_LOCK_PREFIX();
3954
3955 switch (pIemCpu->enmEffOpSize)
3956 {
3957 case IEMMODE_16BIT:
3958 IEM_MC_BEGIN(3, 0);
3959 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3960 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
3961 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3962
3963 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3964 IEM_MC_REF_GREG_U16(pu16Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3965 IEM_MC_REF_EFLAGS(pEFlags);
3966 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
3967
3968 IEM_MC_ADVANCE_RIP();
3969 IEM_MC_END();
3970 return VINF_SUCCESS;
3971
3972 case IEMMODE_32BIT:
3973 IEM_MC_BEGIN(3, 0);
3974 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3975 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
3976 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3977
3978 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3979 IEM_MC_REF_GREG_U32(pu32Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3980 IEM_MC_REF_EFLAGS(pEFlags);
3981 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
3982
3983 IEM_MC_ADVANCE_RIP();
3984 IEM_MC_END();
3985 return VINF_SUCCESS;
3986
3987 case IEMMODE_64BIT:
3988 IEM_MC_BEGIN(3, 0);
3989 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3990 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
3991 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3992
3993 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3994 IEM_MC_REF_GREG_U64(pu64Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3995 IEM_MC_REF_EFLAGS(pEFlags);
3996 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
3997
3998 IEM_MC_ADVANCE_RIP();
3999 IEM_MC_END();
4000 return VINF_SUCCESS;
4001
4002 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4003 }
4004 }
4005 else
4006 {
4007 /*
4008 * We're accessing memory.
4009 */
4010 switch (pIemCpu->enmEffOpSize)
4011 {
4012 case IEMMODE_16BIT:
4013 IEM_MC_BEGIN(3, 3);
4014 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4015 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
4016 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4017 IEM_MC_LOCAL(uint16_t, u16RegCopy);
4018 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4019
4020 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4021 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4022 IEM_MC_FETCH_GREG_U16(u16RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4023 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
4024 IEM_MC_FETCH_EFLAGS(EFlags);
4025 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4026 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
4027 else
4028 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
4029
4030 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4031 IEM_MC_COMMIT_EFLAGS(EFlags);
4032 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16RegCopy);
4033 IEM_MC_ADVANCE_RIP();
4034 IEM_MC_END();
4035 return VINF_SUCCESS;
4036
4037 case IEMMODE_32BIT:
4038 IEM_MC_BEGIN(3, 3);
4039 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4040 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
4041 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4042 IEM_MC_LOCAL(uint32_t, u32RegCopy);
4043 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4044
4045 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4046 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4047 IEM_MC_FETCH_GREG_U32(u32RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4048 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
4049 IEM_MC_FETCH_EFLAGS(EFlags);
4050 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4051 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
4052 else
4053 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
4054
4055 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4056 IEM_MC_COMMIT_EFLAGS(EFlags);
4057 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32RegCopy);
4058 IEM_MC_ADVANCE_RIP();
4059 IEM_MC_END();
4060 return VINF_SUCCESS;
4061
4062 case IEMMODE_64BIT:
4063 IEM_MC_BEGIN(3, 3);
4064 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4065 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
4066 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4067 IEM_MC_LOCAL(uint64_t, u64RegCopy);
4068 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4069
4070 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4071 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4072 IEM_MC_FETCH_GREG_U64(u64RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4073 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
4074 IEM_MC_FETCH_EFLAGS(EFlags);
4075 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4076 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
4077 else
4078 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
4079
4080 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4081 IEM_MC_COMMIT_EFLAGS(EFlags);
4082 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64RegCopy);
4083 IEM_MC_ADVANCE_RIP();
4084 IEM_MC_END();
4085 return VINF_SUCCESS;
4086
4087 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4088 }
4089 }
4090}
4091
4092/** Opcode 0x0f 0xc2. */
4093FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
4094/** Opcode 0x0f 0xc3. */
4095FNIEMOP_STUB(iemOp_movnti_My_Gy);
4096/** Opcode 0x0f 0xc4. */
4097FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
4098/** Opcode 0x0f 0xc5. */
4099FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
4100/** Opcode 0x0f 0xc6. */
4101FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
4102/** Opcode 0x0f 0xc7. */
4103FNIEMOP_STUB(iemOp_Grp9);
4104
4105
4106/**
4107 * Common 'bswap register' helper.
4108 */
4109FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
4110{
4111 IEMOP_HLP_NO_LOCK_PREFIX();
4112 switch (pIemCpu->enmEffOpSize)
4113 {
4114 case IEMMODE_16BIT:
4115 IEM_MC_BEGIN(1, 0);
4116 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4117 IEM_MC_REF_GREG_U32(pu32Dst, iReg); /* Don't clear the high dword! */
4118 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u16, pu32Dst);
4119 IEM_MC_ADVANCE_RIP();
4120 IEM_MC_END();
4121 return VINF_SUCCESS;
4122
4123 case IEMMODE_32BIT:
4124 IEM_MC_BEGIN(1, 0);
4125 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4126 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4127 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4128 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u32, pu32Dst);
4129 IEM_MC_ADVANCE_RIP();
4130 IEM_MC_END();
4131 return VINF_SUCCESS;
4132
4133 case IEMMODE_64BIT:
4134 IEM_MC_BEGIN(1, 0);
4135 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4136 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4137 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u64, pu64Dst);
4138 IEM_MC_ADVANCE_RIP();
4139 IEM_MC_END();
4140 return VINF_SUCCESS;
4141
4142 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4143 }
4144}
4145
4146
4147/** Opcode 0x0f 0xc8. */
4148FNIEMOP_DEF(iemOp_bswap_rAX_r8)
4149{
4150 IEMOP_MNEMONIC("bswap rAX/r8");
4151 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexReg);
4152}
4153
4154
4155/** Opcode 0x0f 0xc9. */
4156FNIEMOP_DEF(iemOp_bswap_rCX_r9)
4157{
4158 IEMOP_MNEMONIC("bswap rCX/r9");
4159 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexReg);
4160}
4161
4162
4163/** Opcode 0x0f 0xca. */
4164FNIEMOP_DEF(iemOp_bswap_rDX_r10)
4165{
4166 IEMOP_MNEMONIC("bswap rDX/r9");
4167 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexReg);
4168}
4169
4170
4171/** Opcode 0x0f 0xcb. */
4172FNIEMOP_DEF(iemOp_bswap_rBX_r11)
4173{
4174 IEMOP_MNEMONIC("bswap rBX/r9");
4175 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexReg);
4176}
4177
4178
4179/** Opcode 0x0f 0xcc. */
4180FNIEMOP_DEF(iemOp_bswap_rSP_r12)
4181{
4182 IEMOP_MNEMONIC("bswap rSP/r12");
4183 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexReg);
4184}
4185
4186
4187/** Opcode 0x0f 0xcd. */
4188FNIEMOP_DEF(iemOp_bswap_rBP_r13)
4189{
4190 IEMOP_MNEMONIC("bswap rBP/r13");
4191 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexReg);
4192}
4193
4194
4195/** Opcode 0x0f 0xce. */
4196FNIEMOP_DEF(iemOp_bswap_rSI_r14)
4197{
4198 IEMOP_MNEMONIC("bswap rSI/r14");
4199 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexReg);
4200}
4201
4202
4203/** Opcode 0x0f 0xcf. */
4204FNIEMOP_DEF(iemOp_bswap_rDI_r15)
4205{
4206 IEMOP_MNEMONIC("bswap rDI/r15");
4207 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexReg);
4208}
4209
4210
4211
4212/** Opcode 0x0f 0xd0. */
4213FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
4214/** Opcode 0x0f 0xd1. */
4215FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
4216/** Opcode 0x0f 0xd2. */
4217FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
4218/** Opcode 0x0f 0xd3. */
4219FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
4220/** Opcode 0x0f 0xd4. */
4221FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
4222/** Opcode 0x0f 0xd5. */
4223FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
4224/** Opcode 0x0f 0xd6. */
4225FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
4226/** Opcode 0x0f 0xd7. */
4227FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
4228/** Opcode 0x0f 0xd8. */
4229FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
4230/** Opcode 0x0f 0xd9. */
4231FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
4232/** Opcode 0x0f 0xda. */
4233FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
4234/** Opcode 0x0f 0xdb. */
4235FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
4236/** Opcode 0x0f 0xdc. */
4237FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
4238/** Opcode 0x0f 0xdd. */
4239FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
4240/** Opcode 0x0f 0xde. */
4241FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
4242/** Opcode 0x0f 0xdf. */
4243FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
4244/** Opcode 0x0f 0xe0. */
4245FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
4246/** Opcode 0x0f 0xe1. */
4247FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
4248/** Opcode 0x0f 0xe2. */
4249FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
4250/** Opcode 0x0f 0xe3. */
4251FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
4252/** Opcode 0x0f 0xe4. */
4253FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
4254/** Opcode 0x0f 0xe5. */
4255FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
4256/** Opcode 0x0f 0xe6. */
4257FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
4258/** Opcode 0x0f 0xe7. */
4259FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
4260/** Opcode 0x0f 0xe8. */
4261FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
4262/** Opcode 0x0f 0xe9. */
4263FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
4264/** Opcode 0x0f 0xea. */
4265FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
4266/** Opcode 0x0f 0xeb. */
4267FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
4268/** Opcode 0x0f 0xec. */
4269FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
4270/** Opcode 0x0f 0xed. */
4271FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
4272/** Opcode 0x0f 0xee. */
4273FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
4274/** Opcode 0x0f 0xef. */
4275FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
4276/** Opcode 0x0f 0xf0. */
4277FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
4278/** Opcode 0x0f 0xf1. */
4279FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
4280/** Opcode 0x0f 0xf2. */
4281FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
4282/** Opcode 0x0f 0xf3. */
4283FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
4284/** Opcode 0x0f 0xf4. */
4285FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
4286/** Opcode 0x0f 0xf5. */
4287FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
4288/** Opcode 0x0f 0xf6. */
4289FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
4290/** Opcode 0x0f 0xf7. */
4291FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
4292/** Opcode 0x0f 0xf8. */
4293FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
4294/** Opcode 0x0f 0xf9. */
4295FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
4296/** Opcode 0x0f 0xfa. */
4297FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
4298/** Opcode 0x0f 0xfb. */
4299FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
4300/** Opcode 0x0f 0xfc. */
4301FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
4302/** Opcode 0x0f 0xfd. */
4303FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
4304/** Opcode 0x0f 0xfe. */
4305FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
4306
4307
4308const PFNIEMOP g_apfnTwoByteMap[256] =
4309{
4310 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4311 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4312 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4313 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_prefetch, iemOp_femms, iemOp_3Dnow,
4314 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4315 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4316 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4317 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4318 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4319 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4320 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4321 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4322 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4323 /* 0x1c */ iemOp_Invalid, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4324 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4325 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4326 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4327 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4328 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4329 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4330 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4331 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4332 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4333 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4334 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4335 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4336 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4337 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4338 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4339 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4340 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4341 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4342 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4343 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4344 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4345 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4346 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4347 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4348 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4349 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4350 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4351 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4352 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4353 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4354 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4355 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4356 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4357 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4358 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4359 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4360 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4361 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4362 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4363 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4364 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4365 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4366 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4367 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4368 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4369 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4370 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4371 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4372 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4373 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4374 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4375 /* 0x71 */ iemOp_Grp12,
4376 /* 0x72 */ iemOp_Grp13,
4377 /* 0x73 */ iemOp_Grp14,
4378 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4379 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4380 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4381 /* 0x77 */ iemOp_emms,
4382 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4383 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4384 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4385 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4386 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4387 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4388 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4389 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4390 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4391 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4392 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4393 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4394 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4395 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4396 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4397 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4398 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4399 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4400 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4401 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4402 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4403 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4404 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4405 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4406 /* 0xc3 */ iemOp_movnti_My_Gy,
4407 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4408 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4409 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4410 /* 0xc7 */ iemOp_Grp9,
4411 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4412 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4413 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4414 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4415 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4416 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4417 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4418 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4419 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4420 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4421 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4422 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4423 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4424 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4425 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4426 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4427 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4428 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4429 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4430 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4431 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4432 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4433 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4434 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4435 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4436 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4437 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4438 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4439 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4440 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4441 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4442 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4443 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4444 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4445 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4446 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4447 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4448 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4449 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4450 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4451 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4452 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4453 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4454 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4455 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4456 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4457 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4458 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4459 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4460 /* 0xff */ iemOp_Invalid
4461};
4462
4463/** @} */
4464
4465
4466/** @name One byte opcodes.
4467 *
4468 * @{
4469 */
4470
4471/** Opcode 0x00. */
4472FNIEMOP_DEF(iemOp_add_Eb_Gb)
4473{
4474 IEMOP_MNEMONIC("add Eb,Gb");
4475 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4476}
4477
4478
4479/** Opcode 0x01. */
4480FNIEMOP_DEF(iemOp_add_Ev_Gv)
4481{
4482 IEMOP_MNEMONIC("add Ev,Gv");
4483 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4484}
4485
4486
4487/** Opcode 0x02. */
4488FNIEMOP_DEF(iemOp_add_Gb_Eb)
4489{
4490 IEMOP_MNEMONIC("add Gb,Eb");
4491 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4492}
4493
4494
4495/** Opcode 0x03. */
4496FNIEMOP_DEF(iemOp_add_Gv_Ev)
4497{
4498 IEMOP_MNEMONIC("add Gv,Ev");
4499 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4500}
4501
4502
4503/** Opcode 0x04. */
4504FNIEMOP_DEF(iemOp_add_Al_Ib)
4505{
4506 IEMOP_MNEMONIC("add al,Ib");
4507 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4508}
4509
4510
4511/** Opcode 0x05. */
4512FNIEMOP_DEF(iemOp_add_eAX_Iz)
4513{
4514 IEMOP_MNEMONIC("add rAX,Iz");
4515 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4516}
4517
4518
4519/** Opcode 0x06. */
4520FNIEMOP_DEF(iemOp_push_ES)
4521{
4522 IEMOP_MNEMONIC("push es");
4523 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4524}
4525
4526
4527/** Opcode 0x07. */
4528FNIEMOP_DEF(iemOp_pop_ES)
4529{
4530 IEMOP_MNEMONIC("pop es");
4531 IEMOP_HLP_NO_64BIT();
4532 IEMOP_HLP_NO_LOCK_PREFIX();
4533 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4534}
4535
4536
4537/** Opcode 0x08. */
4538FNIEMOP_DEF(iemOp_or_Eb_Gb)
4539{
4540 IEMOP_MNEMONIC("or Eb,Gb");
4541 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4542 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4543}
4544
4545
4546/** Opcode 0x09. */
4547FNIEMOP_DEF(iemOp_or_Ev_Gv)
4548{
4549 IEMOP_MNEMONIC("or Ev,Gv ");
4550 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4551 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4552}
4553
4554
4555/** Opcode 0x0a. */
4556FNIEMOP_DEF(iemOp_or_Gb_Eb)
4557{
4558 IEMOP_MNEMONIC("or Gb,Eb");
4559 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4560 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4561}
4562
4563
4564/** Opcode 0x0b. */
4565FNIEMOP_DEF(iemOp_or_Gv_Ev)
4566{
4567 IEMOP_MNEMONIC("or Gv,Ev");
4568 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4569 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4570}
4571
4572
4573/** Opcode 0x0c. */
4574FNIEMOP_DEF(iemOp_or_Al_Ib)
4575{
4576 IEMOP_MNEMONIC("or al,Ib");
4577 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4578 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4579}
4580
4581
4582/** Opcode 0x0d. */
4583FNIEMOP_DEF(iemOp_or_eAX_Iz)
4584{
4585 IEMOP_MNEMONIC("or rAX,Iz");
4586 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4587 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4588}
4589
4590
4591/** Opcode 0x0e. */
4592FNIEMOP_DEF(iemOp_push_CS)
4593{
4594 IEMOP_MNEMONIC("push cs");
4595 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4596}
4597
4598
4599/** Opcode 0x0f. */
4600FNIEMOP_DEF(iemOp_2byteEscape)
4601{
4602 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4603 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4604}
4605
4606/** Opcode 0x10. */
4607FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4608{
4609 IEMOP_MNEMONIC("adc Eb,Gb");
4610 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4611}
4612
4613
4614/** Opcode 0x11. */
4615FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4616{
4617 IEMOP_MNEMONIC("adc Ev,Gv");
4618 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4619}
4620
4621
4622/** Opcode 0x12. */
4623FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4624{
4625 IEMOP_MNEMONIC("adc Gb,Eb");
4626 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4627}
4628
4629
4630/** Opcode 0x13. */
4631FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4632{
4633 IEMOP_MNEMONIC("adc Gv,Ev");
4634 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4635}
4636
4637
4638/** Opcode 0x14. */
4639FNIEMOP_DEF(iemOp_adc_Al_Ib)
4640{
4641 IEMOP_MNEMONIC("adc al,Ib");
4642 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4643}
4644
4645
4646/** Opcode 0x15. */
4647FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4648{
4649 IEMOP_MNEMONIC("adc rAX,Iz");
4650 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4651}
4652
4653
4654/** Opcode 0x16. */
4655FNIEMOP_DEF(iemOp_push_SS)
4656{
4657 IEMOP_MNEMONIC("push ss");
4658 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4659}
4660
4661
4662/** Opcode 0x17. */
4663FNIEMOP_DEF(iemOp_pop_SS)
4664{
4665 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4666 IEMOP_HLP_NO_LOCK_PREFIX();
4667 IEMOP_HLP_NO_64BIT();
4668 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4669}
4670
4671
4672/** Opcode 0x18. */
4673FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4674{
4675 IEMOP_MNEMONIC("sbb Eb,Gb");
4676 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4677}
4678
4679
4680/** Opcode 0x19. */
4681FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4682{
4683 IEMOP_MNEMONIC("sbb Ev,Gv");
4684 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4685}
4686
4687
4688/** Opcode 0x1a. */
4689FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4690{
4691 IEMOP_MNEMONIC("sbb Gb,Eb");
4692 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4693}
4694
4695
4696/** Opcode 0x1b. */
4697FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4698{
4699 IEMOP_MNEMONIC("sbb Gv,Ev");
4700 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4701}
4702
4703
4704/** Opcode 0x1c. */
4705FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4706{
4707 IEMOP_MNEMONIC("sbb al,Ib");
4708 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4709}
4710
4711
4712/** Opcode 0x1d. */
4713FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4714{
4715 IEMOP_MNEMONIC("sbb rAX,Iz");
4716 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4717}
4718
4719
4720/** Opcode 0x1e. */
4721FNIEMOP_DEF(iemOp_push_DS)
4722{
4723 IEMOP_MNEMONIC("push ds");
4724 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4725}
4726
4727
4728/** Opcode 0x1f. */
4729FNIEMOP_DEF(iemOp_pop_DS)
4730{
4731 IEMOP_MNEMONIC("pop ds");
4732 IEMOP_HLP_NO_LOCK_PREFIX();
4733 IEMOP_HLP_NO_64BIT();
4734 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4735}
4736
4737
4738/** Opcode 0x20. */
4739FNIEMOP_DEF(iemOp_and_Eb_Gb)
4740{
4741 IEMOP_MNEMONIC("and Eb,Gb");
4742 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4743 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4744}
4745
4746
4747/** Opcode 0x21. */
4748FNIEMOP_DEF(iemOp_and_Ev_Gv)
4749{
4750 IEMOP_MNEMONIC("and Ev,Gv");
4751 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4752 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4753}
4754
4755
4756/** Opcode 0x22. */
4757FNIEMOP_DEF(iemOp_and_Gb_Eb)
4758{
4759 IEMOP_MNEMONIC("and Gb,Eb");
4760 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4761 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4762}
4763
4764
4765/** Opcode 0x23. */
4766FNIEMOP_DEF(iemOp_and_Gv_Ev)
4767{
4768 IEMOP_MNEMONIC("and Gv,Ev");
4769 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4770 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
4771}
4772
4773
4774/** Opcode 0x24. */
4775FNIEMOP_DEF(iemOp_and_Al_Ib)
4776{
4777 IEMOP_MNEMONIC("and al,Ib");
4778 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4779 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
4780}
4781
4782
4783/** Opcode 0x25. */
4784FNIEMOP_DEF(iemOp_and_eAX_Iz)
4785{
4786 IEMOP_MNEMONIC("and rAX,Iz");
4787 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4788 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
4789}
4790
4791
4792/** Opcode 0x26. */
4793FNIEMOP_DEF(iemOp_seg_ES)
4794{
4795 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
4796 pIemCpu->iEffSeg = X86_SREG_ES;
4797
4798 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4799 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4800}
4801
4802
4803/** Opcode 0x27. */
4804FNIEMOP_STUB(iemOp_daa);
4805
4806
4807/** Opcode 0x28. */
4808FNIEMOP_DEF(iemOp_sub_Eb_Gb)
4809{
4810 IEMOP_MNEMONIC("sub Eb,Gb");
4811 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
4812}
4813
4814
4815/** Opcode 0x29. */
4816FNIEMOP_DEF(iemOp_sub_Ev_Gv)
4817{
4818 IEMOP_MNEMONIC("sub Ev,Gv");
4819 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
4820}
4821
4822
4823/** Opcode 0x2a. */
4824FNIEMOP_DEF(iemOp_sub_Gb_Eb)
4825{
4826 IEMOP_MNEMONIC("sub Gb,Eb");
4827 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
4828}
4829
4830
4831/** Opcode 0x2b. */
4832FNIEMOP_DEF(iemOp_sub_Gv_Ev)
4833{
4834 IEMOP_MNEMONIC("sub Gv,Ev");
4835 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
4836}
4837
4838
4839/** Opcode 0x2c. */
4840FNIEMOP_DEF(iemOp_sub_Al_Ib)
4841{
4842 IEMOP_MNEMONIC("sub al,Ib");
4843 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
4844}
4845
4846
4847/** Opcode 0x2d. */
4848FNIEMOP_DEF(iemOp_sub_eAX_Iz)
4849{
4850 IEMOP_MNEMONIC("sub rAX,Iz");
4851 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
4852}
4853
4854
4855/** Opcode 0x2e. */
4856FNIEMOP_DEF(iemOp_seg_CS)
4857{
4858 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
4859 pIemCpu->iEffSeg = X86_SREG_CS;
4860
4861 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4862 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4863}
4864
4865
4866/** Opcode 0x2f. */
4867FNIEMOP_STUB(iemOp_das);
4868
4869
4870/** Opcode 0x30. */
4871FNIEMOP_DEF(iemOp_xor_Eb_Gb)
4872{
4873 IEMOP_MNEMONIC("xor Eb,Gb");
4874 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4875 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
4876}
4877
4878
4879/** Opcode 0x31. */
4880FNIEMOP_DEF(iemOp_xor_Ev_Gv)
4881{
4882 IEMOP_MNEMONIC("xor Ev,Gv");
4883 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4884 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
4885}
4886
4887
4888/** Opcode 0x32. */
4889FNIEMOP_DEF(iemOp_xor_Gb_Eb)
4890{
4891 IEMOP_MNEMONIC("xor Gb,Eb");
4892 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4893 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
4894}
4895
4896
4897/** Opcode 0x33. */
4898FNIEMOP_DEF(iemOp_xor_Gv_Ev)
4899{
4900 IEMOP_MNEMONIC("xor Gv,Ev");
4901 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4902 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
4903}
4904
4905
4906/** Opcode 0x34. */
4907FNIEMOP_DEF(iemOp_xor_Al_Ib)
4908{
4909 IEMOP_MNEMONIC("xor al,Ib");
4910 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4911 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
4912}
4913
4914
4915/** Opcode 0x35. */
4916FNIEMOP_DEF(iemOp_xor_eAX_Iz)
4917{
4918 IEMOP_MNEMONIC("xor rAX,Iz");
4919 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4920 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
4921}
4922
4923
4924/** Opcode 0x36. */
4925FNIEMOP_DEF(iemOp_seg_SS)
4926{
4927 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
4928 pIemCpu->iEffSeg = X86_SREG_SS;
4929
4930 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4931 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4932}
4933
4934
4935/** Opcode 0x37. */
4936FNIEMOP_STUB(iemOp_aaa);
4937
4938
4939/** Opcode 0x38. */
4940FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
4941{
4942 IEMOP_MNEMONIC("cmp Eb,Gb");
4943 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4944 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
4945}
4946
4947
4948/** Opcode 0x39. */
4949FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
4950{
4951 IEMOP_MNEMONIC("cmp Ev,Gv");
4952 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4953 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
4954}
4955
4956
4957/** Opcode 0x3a. */
4958FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
4959{
4960 IEMOP_MNEMONIC("cmp Gb,Eb");
4961 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
4962}
4963
4964
4965/** Opcode 0x3b. */
4966FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
4967{
4968 IEMOP_MNEMONIC("cmp Gv,Ev");
4969 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
4970}
4971
4972
4973/** Opcode 0x3c. */
4974FNIEMOP_DEF(iemOp_cmp_Al_Ib)
4975{
4976 IEMOP_MNEMONIC("cmp al,Ib");
4977 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
4978}
4979
4980
4981/** Opcode 0x3d. */
4982FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
4983{
4984 IEMOP_MNEMONIC("cmp rAX,Iz");
4985 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
4986}
4987
4988
4989/** Opcode 0x3e. */
4990FNIEMOP_DEF(iemOp_seg_DS)
4991{
4992 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
4993 pIemCpu->iEffSeg = X86_SREG_DS;
4994
4995 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4996 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4997}
4998
4999
5000/** Opcode 0x3f. */
5001FNIEMOP_STUB(iemOp_aas);
5002
5003/**
5004 * Common 'inc/dec/not/neg register' helper.
5005 */
5006FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
5007{
5008 IEMOP_HLP_NO_LOCK_PREFIX();
5009 switch (pIemCpu->enmEffOpSize)
5010 {
5011 case IEMMODE_16BIT:
5012 IEM_MC_BEGIN(2, 0);
5013 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5014 IEM_MC_ARG(uint32_t *, pEFlags, 1);
5015 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5016 IEM_MC_REF_EFLAGS(pEFlags);
5017 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
5018 IEM_MC_ADVANCE_RIP();
5019 IEM_MC_END();
5020 return VINF_SUCCESS;
5021
5022 case IEMMODE_32BIT:
5023 IEM_MC_BEGIN(2, 0);
5024 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5025 IEM_MC_ARG(uint32_t *, pEFlags, 1);
5026 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5027 IEM_MC_REF_EFLAGS(pEFlags);
5028 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
5029 IEM_MC_ADVANCE_RIP();
5030 IEM_MC_END();
5031 return VINF_SUCCESS;
5032
5033 case IEMMODE_64BIT:
5034 IEM_MC_BEGIN(2, 0);
5035 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5036 IEM_MC_ARG(uint32_t *, pEFlags, 1);
5037 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5038 IEM_MC_REF_EFLAGS(pEFlags);
5039 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
5040 IEM_MC_ADVANCE_RIP();
5041 IEM_MC_END();
5042 return VINF_SUCCESS;
5043 }
5044 return VINF_SUCCESS;
5045}
5046
5047
5048/** Opcode 0x40. */
5049FNIEMOP_DEF(iemOp_inc_eAX)
5050{
5051 /*
5052 * This is a REX prefix in 64-bit mode.
5053 */
5054 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5055 {
5056 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
5057
5058 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5059 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5060 }
5061
5062 IEMOP_MNEMONIC("inc eAX");
5063 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
5064}
5065
5066
5067/** Opcode 0x41. */
5068FNIEMOP_DEF(iemOp_inc_eCX)
5069{
5070 /*
5071 * This is a REX prefix in 64-bit mode.
5072 */
5073 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5074 {
5075 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
5076 pIemCpu->uRexB = 1 << 3;
5077
5078 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5079 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5080 }
5081
5082 IEMOP_MNEMONIC("inc eCX");
5083 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
5084}
5085
5086
5087/** Opcode 0x42. */
5088FNIEMOP_DEF(iemOp_inc_eDX)
5089{
5090 /*
5091 * This is a REX prefix in 64-bit mode.
5092 */
5093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5094 {
5095 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
5096 pIemCpu->uRexIndex = 1 << 3;
5097
5098 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5099 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5100 }
5101
5102 IEMOP_MNEMONIC("inc eDX");
5103 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
5104}
5105
5106
5107
5108/** Opcode 0x43. */
5109FNIEMOP_DEF(iemOp_inc_eBX)
5110{
5111 /*
5112 * This is a REX prefix in 64-bit mode.
5113 */
5114 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5115 {
5116 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5117 pIemCpu->uRexB = 1 << 3;
5118 pIemCpu->uRexIndex = 1 << 3;
5119
5120 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5121 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5122 }
5123
5124 IEMOP_MNEMONIC("inc eBX");
5125 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
5126}
5127
5128
5129/** Opcode 0x44. */
5130FNIEMOP_DEF(iemOp_inc_eSP)
5131{
5132 /*
5133 * This is a REX prefix in 64-bit mode.
5134 */
5135 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5136 {
5137 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
5138 pIemCpu->uRexReg = 1 << 3;
5139
5140 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5141 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5142 }
5143
5144 IEMOP_MNEMONIC("inc eSP");
5145 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
5146}
5147
5148
5149/** Opcode 0x45. */
5150FNIEMOP_DEF(iemOp_inc_eBP)
5151{
5152 /*
5153 * This is a REX prefix in 64-bit mode.
5154 */
5155 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5156 {
5157 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
5158 pIemCpu->uRexReg = 1 << 3;
5159 pIemCpu->uRexB = 1 << 3;
5160
5161 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5162 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5163 }
5164
5165 IEMOP_MNEMONIC("inc eBP");
5166 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
5167}
5168
5169
5170/** Opcode 0x46. */
5171FNIEMOP_DEF(iemOp_inc_eSI)
5172{
5173 /*
5174 * This is a REX prefix in 64-bit mode.
5175 */
5176 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5177 {
5178 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
5179 pIemCpu->uRexReg = 1 << 3;
5180 pIemCpu->uRexIndex = 1 << 3;
5181
5182 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5183 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5184 }
5185
5186 IEMOP_MNEMONIC("inc eSI");
5187 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
5188}
5189
5190
5191/** Opcode 0x47. */
5192FNIEMOP_DEF(iemOp_inc_eDI)
5193{
5194 /*
5195 * This is a REX prefix in 64-bit mode.
5196 */
5197 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5198 {
5199 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5200 pIemCpu->uRexReg = 1 << 3;
5201 pIemCpu->uRexB = 1 << 3;
5202 pIemCpu->uRexIndex = 1 << 3;
5203
5204 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5205 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5206 }
5207
5208 IEMOP_MNEMONIC("inc eDI");
5209 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
5210}
5211
5212
5213/** Opcode 0x48. */
5214FNIEMOP_DEF(iemOp_dec_eAX)
5215{
5216 /*
5217 * This is a REX prefix in 64-bit mode.
5218 */
5219 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5220 {
5221 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
5222 iemRecalEffOpSize(pIemCpu);
5223
5224 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5225 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5226 }
5227
5228 IEMOP_MNEMONIC("dec eAX");
5229 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
5230}
5231
5232
5233/** Opcode 0x49. */
5234FNIEMOP_DEF(iemOp_dec_eCX)
5235{
5236 /*
5237 * This is a REX prefix in 64-bit mode.
5238 */
5239 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5240 {
5241 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5242 pIemCpu->uRexB = 1 << 3;
5243 iemRecalEffOpSize(pIemCpu);
5244
5245 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5246 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5247 }
5248
5249 IEMOP_MNEMONIC("dec eCX");
5250 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
5251}
5252
5253
5254/** Opcode 0x4a. */
5255FNIEMOP_DEF(iemOp_dec_eDX)
5256{
5257 /*
5258 * This is a REX prefix in 64-bit mode.
5259 */
5260 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5261 {
5262 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5263 pIemCpu->uRexIndex = 1 << 3;
5264 iemRecalEffOpSize(pIemCpu);
5265
5266 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5267 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5268 }
5269
5270 IEMOP_MNEMONIC("dec eDX");
5271 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
5272}
5273
5274
5275/** Opcode 0x4b. */
5276FNIEMOP_DEF(iemOp_dec_eBX)
5277{
5278 /*
5279 * This is a REX prefix in 64-bit mode.
5280 */
5281 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5282 {
5283 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5284 pIemCpu->uRexB = 1 << 3;
5285 pIemCpu->uRexIndex = 1 << 3;
5286 iemRecalEffOpSize(pIemCpu);
5287
5288 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5289 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5290 }
5291
5292 IEMOP_MNEMONIC("dec eBX");
5293 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
5294}
5295
5296
5297/** Opcode 0x4c. */
5298FNIEMOP_DEF(iemOp_dec_eSP)
5299{
5300 /*
5301 * This is a REX prefix in 64-bit mode.
5302 */
5303 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5304 {
5305 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
5306 pIemCpu->uRexReg = 1 << 3;
5307 iemRecalEffOpSize(pIemCpu);
5308
5309 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5310 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5311 }
5312
5313 IEMOP_MNEMONIC("dec eSP");
5314 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5315}
5316
5317
5318/** Opcode 0x4d. */
5319FNIEMOP_DEF(iemOp_dec_eBP)
5320{
5321 /*
5322 * This is a REX prefix in 64-bit mode.
5323 */
5324 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5325 {
5326 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5327 pIemCpu->uRexReg = 1 << 3;
5328 pIemCpu->uRexB = 1 << 3;
5329 iemRecalEffOpSize(pIemCpu);
5330
5331 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5332 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5333 }
5334
5335 IEMOP_MNEMONIC("dec eBP");
5336 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5337}
5338
5339
5340/** Opcode 0x4e. */
5341FNIEMOP_DEF(iemOp_dec_eSI)
5342{
5343 /*
5344 * This is a REX prefix in 64-bit mode.
5345 */
5346 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5347 {
5348 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5349 pIemCpu->uRexReg = 1 << 3;
5350 pIemCpu->uRexIndex = 1 << 3;
5351 iemRecalEffOpSize(pIemCpu);
5352
5353 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5354 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5355 }
5356
5357 IEMOP_MNEMONIC("dec eSI");
5358 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5359}
5360
5361
5362/** Opcode 0x4f. */
5363FNIEMOP_DEF(iemOp_dec_eDI)
5364{
5365 /*
5366 * This is a REX prefix in 64-bit mode.
5367 */
5368 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5369 {
5370 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5371 pIemCpu->uRexReg = 1 << 3;
5372 pIemCpu->uRexB = 1 << 3;
5373 pIemCpu->uRexIndex = 1 << 3;
5374 iemRecalEffOpSize(pIemCpu);
5375
5376 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5377 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5378 }
5379
5380 IEMOP_MNEMONIC("dec eDI");
5381 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5382}
5383
5384
5385/**
5386 * Common 'push register' helper.
5387 */
5388FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5389{
5390 IEMOP_HLP_NO_LOCK_PREFIX();
5391 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5392 {
5393 iReg |= pIemCpu->uRexB;
5394 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5395 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5396 }
5397
5398 switch (pIemCpu->enmEffOpSize)
5399 {
5400 case IEMMODE_16BIT:
5401 IEM_MC_BEGIN(0, 1);
5402 IEM_MC_LOCAL(uint16_t, u16Value);
5403 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5404 IEM_MC_PUSH_U16(u16Value);
5405 IEM_MC_ADVANCE_RIP();
5406 IEM_MC_END();
5407 break;
5408
5409 case IEMMODE_32BIT:
5410 IEM_MC_BEGIN(0, 1);
5411 IEM_MC_LOCAL(uint32_t, u32Value);
5412 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5413 IEM_MC_PUSH_U32(u32Value);
5414 IEM_MC_ADVANCE_RIP();
5415 IEM_MC_END();
5416 break;
5417
5418 case IEMMODE_64BIT:
5419 IEM_MC_BEGIN(0, 1);
5420 IEM_MC_LOCAL(uint64_t, u64Value);
5421 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5422 IEM_MC_PUSH_U64(u64Value);
5423 IEM_MC_ADVANCE_RIP();
5424 IEM_MC_END();
5425 break;
5426 }
5427
5428 return VINF_SUCCESS;
5429}
5430
5431
5432/** Opcode 0x50. */
5433FNIEMOP_DEF(iemOp_push_eAX)
5434{
5435 IEMOP_MNEMONIC("push rAX");
5436 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5437}
5438
5439
5440/** Opcode 0x51. */
5441FNIEMOP_DEF(iemOp_push_eCX)
5442{
5443 IEMOP_MNEMONIC("push rCX");
5444 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5445}
5446
5447
5448/** Opcode 0x52. */
5449FNIEMOP_DEF(iemOp_push_eDX)
5450{
5451 IEMOP_MNEMONIC("push rDX");
5452 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5453}
5454
5455
5456/** Opcode 0x53. */
5457FNIEMOP_DEF(iemOp_push_eBX)
5458{
5459 IEMOP_MNEMONIC("push rBX");
5460 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5461}
5462
5463
5464/** Opcode 0x54. */
5465FNIEMOP_DEF(iemOp_push_eSP)
5466{
5467 IEMOP_MNEMONIC("push rSP");
5468 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5469}
5470
5471
5472/** Opcode 0x55. */
5473FNIEMOP_DEF(iemOp_push_eBP)
5474{
5475 IEMOP_MNEMONIC("push rBP");
5476 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5477}
5478
5479
5480/** Opcode 0x56. */
5481FNIEMOP_DEF(iemOp_push_eSI)
5482{
5483 IEMOP_MNEMONIC("push rSI");
5484 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5485}
5486
5487
5488/** Opcode 0x57. */
5489FNIEMOP_DEF(iemOp_push_eDI)
5490{
5491 IEMOP_MNEMONIC("push rDI");
5492 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5493}
5494
5495
5496/**
5497 * Common 'pop register' helper.
5498 */
5499FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5500{
5501 IEMOP_HLP_NO_LOCK_PREFIX();
5502 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5503 {
5504 iReg |= pIemCpu->uRexB;
5505 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5506 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5507 }
5508
5509/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5510 * handle it, for that matter (Intel pseudo code hints that the popped
5511 * value is incremented by the stack item size.) Test it, both encodings
5512 * and all three register sizes. */
5513 switch (pIemCpu->enmEffOpSize)
5514 {
5515 case IEMMODE_16BIT:
5516 IEM_MC_BEGIN(0, 1);
5517 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5518 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5519 IEM_MC_POP_U16(pu16Dst);
5520 IEM_MC_ADVANCE_RIP();
5521 IEM_MC_END();
5522 break;
5523
5524 case IEMMODE_32BIT:
5525 IEM_MC_BEGIN(0, 1);
5526 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5527 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5528 IEM_MC_POP_U32(pu32Dst);
5529 IEM_MC_ADVANCE_RIP();
5530 IEM_MC_END();
5531 break;
5532
5533 case IEMMODE_64BIT:
5534 IEM_MC_BEGIN(0, 1);
5535 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5536 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5537 IEM_MC_POP_U64(pu64Dst);
5538 IEM_MC_ADVANCE_RIP();
5539 IEM_MC_END();
5540 break;
5541 }
5542
5543 return VINF_SUCCESS;
5544}
5545
5546
5547/** Opcode 0x58. */
5548FNIEMOP_DEF(iemOp_pop_eAX)
5549{
5550 IEMOP_MNEMONIC("pop rAX");
5551 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5552}
5553
5554
5555/** Opcode 0x59. */
5556FNIEMOP_DEF(iemOp_pop_eCX)
5557{
5558 IEMOP_MNEMONIC("pop rCX");
5559 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5560}
5561
5562
5563/** Opcode 0x5a. */
5564FNIEMOP_DEF(iemOp_pop_eDX)
5565{
5566 IEMOP_MNEMONIC("pop rDX");
5567 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5568}
5569
5570
5571/** Opcode 0x5b. */
5572FNIEMOP_DEF(iemOp_pop_eBX)
5573{
5574 IEMOP_MNEMONIC("pop rBX");
5575 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5576}
5577
5578
5579/** Opcode 0x5c. */
5580FNIEMOP_DEF(iemOp_pop_eSP)
5581{
5582 IEMOP_MNEMONIC("pop rSP");
5583 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5584}
5585
5586
5587/** Opcode 0x5d. */
5588FNIEMOP_DEF(iemOp_pop_eBP)
5589{
5590 IEMOP_MNEMONIC("pop rBP");
5591 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5592}
5593
5594
5595/** Opcode 0x5e. */
5596FNIEMOP_DEF(iemOp_pop_eSI)
5597{
5598 IEMOP_MNEMONIC("pop rSI");
5599 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5600}
5601
5602
5603/** Opcode 0x5f. */
5604FNIEMOP_DEF(iemOp_pop_eDI)
5605{
5606 IEMOP_MNEMONIC("pop rDI");
5607 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5608}
5609
5610
5611/** Opcode 0x60. */
5612FNIEMOP_DEF(iemOp_pusha)
5613{
5614 IEMOP_MNEMONIC("pusha");
5615 IEMOP_HLP_NO_64BIT();
5616 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5617 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5618 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5619 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5620}
5621
5622
5623/** Opcode 0x61. */
5624FNIEMOP_DEF(iemOp_popa)
5625{
5626 IEMOP_MNEMONIC("popa");
5627 IEMOP_HLP_NO_64BIT();
5628 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5629 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5630 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5631 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5632}
5633
5634
5635/** Opcode 0x62. */
5636FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5637/** Opcode 0x63. */
5638FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5639
5640
5641/** Opcode 0x64. */
5642FNIEMOP_DEF(iemOp_seg_FS)
5643{
5644 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5645 pIemCpu->iEffSeg = X86_SREG_FS;
5646
5647 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5648 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5649}
5650
5651
5652/** Opcode 0x65. */
5653FNIEMOP_DEF(iemOp_seg_GS)
5654{
5655 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5656 pIemCpu->iEffSeg = X86_SREG_GS;
5657
5658 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5659 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5660}
5661
5662
5663/** Opcode 0x66. */
5664FNIEMOP_DEF(iemOp_op_size)
5665{
5666 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5667 iemRecalEffOpSize(pIemCpu);
5668
5669 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5670 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5671}
5672
5673
5674/** Opcode 0x67. */
5675FNIEMOP_DEF(iemOp_addr_size)
5676{
5677 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5678 switch (pIemCpu->enmDefAddrMode)
5679 {
5680 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5681 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5682 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5683 default: AssertFailed();
5684 }
5685
5686 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5687 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5688}
5689
5690
5691/** Opcode 0x68. */
5692FNIEMOP_DEF(iemOp_push_Iz)
5693{
5694 IEMOP_MNEMONIC("push Iz");
5695 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5696 switch (pIemCpu->enmEffOpSize)
5697 {
5698 case IEMMODE_16BIT:
5699 {
5700 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5701 IEMOP_HLP_NO_LOCK_PREFIX();
5702 IEM_MC_BEGIN(0,0);
5703 IEM_MC_PUSH_U16(u16Imm);
5704 IEM_MC_ADVANCE_RIP();
5705 IEM_MC_END();
5706 return VINF_SUCCESS;
5707 }
5708
5709 case IEMMODE_32BIT:
5710 {
5711 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5712 IEMOP_HLP_NO_LOCK_PREFIX();
5713 IEM_MC_BEGIN(0,0);
5714 IEM_MC_PUSH_U32(u32Imm);
5715 IEM_MC_ADVANCE_RIP();
5716 IEM_MC_END();
5717 return VINF_SUCCESS;
5718 }
5719
5720 case IEMMODE_64BIT:
5721 {
5722 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5723 IEMOP_HLP_NO_LOCK_PREFIX();
5724 IEM_MC_BEGIN(0,0);
5725 IEM_MC_PUSH_U64(u64Imm);
5726 IEM_MC_ADVANCE_RIP();
5727 IEM_MC_END();
5728 return VINF_SUCCESS;
5729 }
5730
5731 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5732 }
5733}
5734
5735
5736/** Opcode 0x69. */
5737FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5738{
5739 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5740 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5741 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5742
5743 switch (pIemCpu->enmEffOpSize)
5744 {
5745 case IEMMODE_16BIT:
5746 {
5747 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5748 IEMOP_HLP_NO_LOCK_PREFIX();
5749 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5750 {
5751 /* register operand */
5752 IEM_MC_BEGIN(3, 1);
5753 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5754 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5755 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5756 IEM_MC_LOCAL(uint16_t, u16Tmp);
5757
5758 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5759 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5760 IEM_MC_REF_EFLAGS(pEFlags);
5761 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5762 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5763
5764 IEM_MC_ADVANCE_RIP();
5765 IEM_MC_END();
5766 }
5767 else
5768 {
5769 /* memory operand */
5770 IEM_MC_BEGIN(3, 2);
5771 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5772 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5773 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5774 IEM_MC_LOCAL(uint16_t, u16Tmp);
5775 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5776
5777 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5778 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5779 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5780 IEM_MC_REF_EFLAGS(pEFlags);
5781 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5782 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5783
5784 IEM_MC_ADVANCE_RIP();
5785 IEM_MC_END();
5786 }
5787 return VINF_SUCCESS;
5788 }
5789
5790 case IEMMODE_32BIT:
5791 {
5792 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5793 IEMOP_HLP_NO_LOCK_PREFIX();
5794 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5795 {
5796 /* register operand */
5797 IEM_MC_BEGIN(3, 1);
5798 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5799 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5800 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5801 IEM_MC_LOCAL(uint32_t, u32Tmp);
5802
5803 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5804 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5805 IEM_MC_REF_EFLAGS(pEFlags);
5806 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5807 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5808
5809 IEM_MC_ADVANCE_RIP();
5810 IEM_MC_END();
5811 }
5812 else
5813 {
5814 /* memory operand */
5815 IEM_MC_BEGIN(3, 2);
5816 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5817 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5818 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5819 IEM_MC_LOCAL(uint32_t, u32Tmp);
5820 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5821
5822 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5823 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5824 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5825 IEM_MC_REF_EFLAGS(pEFlags);
5826 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5827 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5828
5829 IEM_MC_ADVANCE_RIP();
5830 IEM_MC_END();
5831 }
5832 return VINF_SUCCESS;
5833 }
5834
5835 case IEMMODE_64BIT:
5836 {
5837 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5838 IEMOP_HLP_NO_LOCK_PREFIX();
5839 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5840 {
5841 /* register operand */
5842 IEM_MC_BEGIN(3, 1);
5843 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5844 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5845 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5846 IEM_MC_LOCAL(uint64_t, u64Tmp);
5847
5848 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5849 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5850 IEM_MC_REF_EFLAGS(pEFlags);
5851 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5852 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5853
5854 IEM_MC_ADVANCE_RIP();
5855 IEM_MC_END();
5856 }
5857 else
5858 {
5859 /* memory operand */
5860 IEM_MC_BEGIN(3, 2);
5861 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5862 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5863 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5864 IEM_MC_LOCAL(uint64_t, u64Tmp);
5865 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5866
5867 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5868 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5869 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5870 IEM_MC_REF_EFLAGS(pEFlags);
5871 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5872 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5873
5874 IEM_MC_ADVANCE_RIP();
5875 IEM_MC_END();
5876 }
5877 return VINF_SUCCESS;
5878 }
5879 }
5880 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5881}
5882
5883
5884/** Opcode 0x6a. */
5885FNIEMOP_DEF(iemOp_push_Ib)
5886{
5887 IEMOP_MNEMONIC("push Ib");
5888 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5889 IEMOP_HLP_NO_LOCK_PREFIX();
5890 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5891
5892 IEM_MC_BEGIN(0,0);
5893 switch (pIemCpu->enmEffOpSize)
5894 {
5895 case IEMMODE_16BIT:
5896 IEM_MC_PUSH_U16(i8Imm);
5897 break;
5898 case IEMMODE_32BIT:
5899 IEM_MC_PUSH_U32(i8Imm);
5900 break;
5901 case IEMMODE_64BIT:
5902 IEM_MC_PUSH_U64(i8Imm);
5903 break;
5904 }
5905 IEM_MC_ADVANCE_RIP();
5906 IEM_MC_END();
5907 return VINF_SUCCESS;
5908}
5909
5910
5911/** Opcode 0x6b. */
5912FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
5913{
5914 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
5915 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5916 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
5917 IEMOP_HLP_NO_LOCK_PREFIX();
5918 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5919
5920 switch (pIemCpu->enmEffOpSize)
5921 {
5922 case IEMMODE_16BIT:
5923 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5924 {
5925 /* register operand */
5926 IEM_MC_BEGIN(3, 1);
5927 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5928 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5929 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5930 IEM_MC_LOCAL(uint16_t, u16Tmp);
5931
5932 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5933 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5934 IEM_MC_REF_EFLAGS(pEFlags);
5935 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5936 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5937
5938 IEM_MC_ADVANCE_RIP();
5939 IEM_MC_END();
5940 }
5941 else
5942 {
5943 /* memory operand */
5944 IEM_MC_BEGIN(3, 2);
5945 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5946 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5947 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5948 IEM_MC_LOCAL(uint16_t, u16Tmp);
5949 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5950
5951 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5952 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5953 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5954 IEM_MC_REF_EFLAGS(pEFlags);
5955 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5956 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5957
5958 IEM_MC_ADVANCE_RIP();
5959 IEM_MC_END();
5960 }
5961 return VINF_SUCCESS;
5962
5963 case IEMMODE_32BIT:
5964 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5965 {
5966 /* register operand */
5967 IEM_MC_BEGIN(3, 1);
5968 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5969 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5970 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5971 IEM_MC_LOCAL(uint32_t, u32Tmp);
5972
5973 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5974 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5975 IEM_MC_REF_EFLAGS(pEFlags);
5976 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5977 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5978
5979 IEM_MC_ADVANCE_RIP();
5980 IEM_MC_END();
5981 }
5982 else
5983 {
5984 /* memory operand */
5985 IEM_MC_BEGIN(3, 2);
5986 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5987 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5988 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5989 IEM_MC_LOCAL(uint32_t, u32Tmp);
5990 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5991
5992 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5993 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5994 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5995 IEM_MC_REF_EFLAGS(pEFlags);
5996 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5997 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5998
5999 IEM_MC_ADVANCE_RIP();
6000 IEM_MC_END();
6001 }
6002 return VINF_SUCCESS;
6003
6004 case IEMMODE_64BIT:
6005 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6006 {
6007 /* register operand */
6008 IEM_MC_BEGIN(3, 1);
6009 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6010 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
6011 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6012 IEM_MC_LOCAL(uint64_t, u64Tmp);
6013
6014 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6015 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
6016 IEM_MC_REF_EFLAGS(pEFlags);
6017 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6018 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6019
6020 IEM_MC_ADVANCE_RIP();
6021 IEM_MC_END();
6022 }
6023 else
6024 {
6025 /* memory operand */
6026 IEM_MC_BEGIN(3, 2);
6027 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6028 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
6029 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6030 IEM_MC_LOCAL(uint64_t, u64Tmp);
6031 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6032
6033 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6034 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6035 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
6036 IEM_MC_REF_EFLAGS(pEFlags);
6037 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6038 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6039
6040 IEM_MC_ADVANCE_RIP();
6041 IEM_MC_END();
6042 }
6043 return VINF_SUCCESS;
6044 }
6045 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6046}
6047
6048
6049/** Opcode 0x6c. */
6050FNIEMOP_DEF(iemOp_insb_Yb_DX)
6051{
6052 IEMOP_HLP_NO_LOCK_PREFIX();
6053 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6054 {
6055 IEMOP_MNEMONIC("rep ins Yb,DX");
6056 switch (pIemCpu->enmEffAddrMode)
6057 {
6058 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
6059 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
6060 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
6061 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6062 }
6063 }
6064 else
6065 {
6066 IEMOP_MNEMONIC("ins Yb,DX");
6067 switch (pIemCpu->enmEffAddrMode)
6068 {
6069 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
6070 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
6071 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
6072 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6073 }
6074 }
6075}
6076
6077
6078/** Opcode 0x6d. */
6079FNIEMOP_DEF(iemOp_inswd_Yv_DX)
6080{
6081 IEMOP_HLP_NO_LOCK_PREFIX();
6082 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6083 {
6084 IEMOP_MNEMONIC("rep ins Yv,DX");
6085 switch (pIemCpu->enmEffOpSize)
6086 {
6087 case IEMMODE_16BIT:
6088 switch (pIemCpu->enmEffAddrMode)
6089 {
6090 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
6091 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
6092 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
6093 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6094 }
6095 break;
6096 case IEMMODE_64BIT:
6097 case IEMMODE_32BIT:
6098 switch (pIemCpu->enmEffAddrMode)
6099 {
6100 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
6101 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
6102 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
6103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6104 }
6105 break;
6106 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6107 }
6108 }
6109 else
6110 {
6111 IEMOP_MNEMONIC("ins Yv,DX");
6112 switch (pIemCpu->enmEffOpSize)
6113 {
6114 case IEMMODE_16BIT:
6115 switch (pIemCpu->enmEffAddrMode)
6116 {
6117 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
6118 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
6119 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
6120 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6121 }
6122 break;
6123 case IEMMODE_64BIT:
6124 case IEMMODE_32BIT:
6125 switch (pIemCpu->enmEffAddrMode)
6126 {
6127 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
6128 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
6129 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
6130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6131 }
6132 break;
6133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6134 }
6135 }
6136}
6137
6138
6139/** Opcode 0x6e. */
6140FNIEMOP_DEF(iemOp_outsb_Yb_DX)
6141{
6142 IEMOP_HLP_NO_LOCK_PREFIX();
6143 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6144 {
6145 IEMOP_MNEMONIC("rep out DX,Yb");
6146 switch (pIemCpu->enmEffAddrMode)
6147 {
6148 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
6149 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
6150 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
6151 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6152 }
6153 }
6154 else
6155 {
6156 IEMOP_MNEMONIC("out DX,Yb");
6157 switch (pIemCpu->enmEffAddrMode)
6158 {
6159 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
6160 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
6161 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
6162 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6163 }
6164 }
6165}
6166
6167
6168/** Opcode 0x6f. */
6169FNIEMOP_DEF(iemOp_outswd_Yv_DX)
6170{
6171 IEMOP_HLP_NO_LOCK_PREFIX();
6172 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6173 {
6174 IEMOP_MNEMONIC("rep outs DX,Yv");
6175 switch (pIemCpu->enmEffOpSize)
6176 {
6177 case IEMMODE_16BIT:
6178 switch (pIemCpu->enmEffAddrMode)
6179 {
6180 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
6181 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
6182 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
6183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6184 }
6185 break;
6186 case IEMMODE_64BIT:
6187 case IEMMODE_32BIT:
6188 switch (pIemCpu->enmEffAddrMode)
6189 {
6190 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
6191 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
6192 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
6193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6194 }
6195 break;
6196 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6197 }
6198 }
6199 else
6200 {
6201 IEMOP_MNEMONIC("outs DX,Yv");
6202 switch (pIemCpu->enmEffOpSize)
6203 {
6204 case IEMMODE_16BIT:
6205 switch (pIemCpu->enmEffAddrMode)
6206 {
6207 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
6208 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
6209 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
6210 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6211 }
6212 break;
6213 case IEMMODE_64BIT:
6214 case IEMMODE_32BIT:
6215 switch (pIemCpu->enmEffAddrMode)
6216 {
6217 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
6218 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
6219 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
6220 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6221 }
6222 break;
6223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6224 }
6225 }
6226}
6227
6228
6229/** Opcode 0x70. */
6230FNIEMOP_DEF(iemOp_jo_Jb)
6231{
6232 IEMOP_MNEMONIC("jo Jb");
6233 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6234 IEMOP_HLP_NO_LOCK_PREFIX();
6235 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6236
6237 IEM_MC_BEGIN(0, 0);
6238 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6239 IEM_MC_REL_JMP_S8(i8Imm);
6240 } IEM_MC_ELSE() {
6241 IEM_MC_ADVANCE_RIP();
6242 } IEM_MC_ENDIF();
6243 IEM_MC_END();
6244 return VINF_SUCCESS;
6245}
6246
6247
6248/** Opcode 0x71. */
6249FNIEMOP_DEF(iemOp_jno_Jb)
6250{
6251 IEMOP_MNEMONIC("jno Jb");
6252 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6253 IEMOP_HLP_NO_LOCK_PREFIX();
6254 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6255
6256 IEM_MC_BEGIN(0, 0);
6257 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6258 IEM_MC_ADVANCE_RIP();
6259 } IEM_MC_ELSE() {
6260 IEM_MC_REL_JMP_S8(i8Imm);
6261 } IEM_MC_ENDIF();
6262 IEM_MC_END();
6263 return VINF_SUCCESS;
6264}
6265
6266/** Opcode 0x72. */
6267FNIEMOP_DEF(iemOp_jc_Jb)
6268{
6269 IEMOP_MNEMONIC("jc/jnae Jb");
6270 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6271 IEMOP_HLP_NO_LOCK_PREFIX();
6272 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6273
6274 IEM_MC_BEGIN(0, 0);
6275 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6276 IEM_MC_REL_JMP_S8(i8Imm);
6277 } IEM_MC_ELSE() {
6278 IEM_MC_ADVANCE_RIP();
6279 } IEM_MC_ENDIF();
6280 IEM_MC_END();
6281 return VINF_SUCCESS;
6282}
6283
6284
6285/** Opcode 0x73. */
6286FNIEMOP_DEF(iemOp_jnc_Jb)
6287{
6288 IEMOP_MNEMONIC("jnc/jnb Jb");
6289 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6290 IEMOP_HLP_NO_LOCK_PREFIX();
6291 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6292
6293 IEM_MC_BEGIN(0, 0);
6294 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6295 IEM_MC_ADVANCE_RIP();
6296 } IEM_MC_ELSE() {
6297 IEM_MC_REL_JMP_S8(i8Imm);
6298 } IEM_MC_ENDIF();
6299 IEM_MC_END();
6300 return VINF_SUCCESS;
6301}
6302
6303
6304/** Opcode 0x74. */
6305FNIEMOP_DEF(iemOp_je_Jb)
6306{
6307 IEMOP_MNEMONIC("je/jz Jb");
6308 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6309 IEMOP_HLP_NO_LOCK_PREFIX();
6310 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6311
6312 IEM_MC_BEGIN(0, 0);
6313 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6314 IEM_MC_REL_JMP_S8(i8Imm);
6315 } IEM_MC_ELSE() {
6316 IEM_MC_ADVANCE_RIP();
6317 } IEM_MC_ENDIF();
6318 IEM_MC_END();
6319 return VINF_SUCCESS;
6320}
6321
6322
6323/** Opcode 0x75. */
6324FNIEMOP_DEF(iemOp_jne_Jb)
6325{
6326 IEMOP_MNEMONIC("jne/jnz Jb");
6327 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6328 IEMOP_HLP_NO_LOCK_PREFIX();
6329 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6330
6331 IEM_MC_BEGIN(0, 0);
6332 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6333 IEM_MC_ADVANCE_RIP();
6334 } IEM_MC_ELSE() {
6335 IEM_MC_REL_JMP_S8(i8Imm);
6336 } IEM_MC_ENDIF();
6337 IEM_MC_END();
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/** Opcode 0x76. */
6343FNIEMOP_DEF(iemOp_jbe_Jb)
6344{
6345 IEMOP_MNEMONIC("jbe/jna Jb");
6346 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6347 IEMOP_HLP_NO_LOCK_PREFIX();
6348 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6349
6350 IEM_MC_BEGIN(0, 0);
6351 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6352 IEM_MC_REL_JMP_S8(i8Imm);
6353 } IEM_MC_ELSE() {
6354 IEM_MC_ADVANCE_RIP();
6355 } IEM_MC_ENDIF();
6356 IEM_MC_END();
6357 return VINF_SUCCESS;
6358}
6359
6360
6361/** Opcode 0x77. */
6362FNIEMOP_DEF(iemOp_jnbe_Jb)
6363{
6364 IEMOP_MNEMONIC("jnbe/ja Jb");
6365 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6366 IEMOP_HLP_NO_LOCK_PREFIX();
6367 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6368
6369 IEM_MC_BEGIN(0, 0);
6370 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6371 IEM_MC_ADVANCE_RIP();
6372 } IEM_MC_ELSE() {
6373 IEM_MC_REL_JMP_S8(i8Imm);
6374 } IEM_MC_ENDIF();
6375 IEM_MC_END();
6376 return VINF_SUCCESS;
6377}
6378
6379
6380/** Opcode 0x78. */
6381FNIEMOP_DEF(iemOp_js_Jb)
6382{
6383 IEMOP_MNEMONIC("js Jb");
6384 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6385 IEMOP_HLP_NO_LOCK_PREFIX();
6386 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6387
6388 IEM_MC_BEGIN(0, 0);
6389 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6390 IEM_MC_REL_JMP_S8(i8Imm);
6391 } IEM_MC_ELSE() {
6392 IEM_MC_ADVANCE_RIP();
6393 } IEM_MC_ENDIF();
6394 IEM_MC_END();
6395 return VINF_SUCCESS;
6396}
6397
6398
6399/** Opcode 0x79. */
6400FNIEMOP_DEF(iemOp_jns_Jb)
6401{
6402 IEMOP_MNEMONIC("jns Jb");
6403 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6404 IEMOP_HLP_NO_LOCK_PREFIX();
6405 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6406
6407 IEM_MC_BEGIN(0, 0);
6408 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6409 IEM_MC_ADVANCE_RIP();
6410 } IEM_MC_ELSE() {
6411 IEM_MC_REL_JMP_S8(i8Imm);
6412 } IEM_MC_ENDIF();
6413 IEM_MC_END();
6414 return VINF_SUCCESS;
6415}
6416
6417
6418/** Opcode 0x7a. */
6419FNIEMOP_DEF(iemOp_jp_Jb)
6420{
6421 IEMOP_MNEMONIC("jp Jb");
6422 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6423 IEMOP_HLP_NO_LOCK_PREFIX();
6424 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6425
6426 IEM_MC_BEGIN(0, 0);
6427 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6428 IEM_MC_REL_JMP_S8(i8Imm);
6429 } IEM_MC_ELSE() {
6430 IEM_MC_ADVANCE_RIP();
6431 } IEM_MC_ENDIF();
6432 IEM_MC_END();
6433 return VINF_SUCCESS;
6434}
6435
6436
6437/** Opcode 0x7b. */
6438FNIEMOP_DEF(iemOp_jnp_Jb)
6439{
6440 IEMOP_MNEMONIC("jnp Jb");
6441 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6442 IEMOP_HLP_NO_LOCK_PREFIX();
6443 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6444
6445 IEM_MC_BEGIN(0, 0);
6446 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6447 IEM_MC_ADVANCE_RIP();
6448 } IEM_MC_ELSE() {
6449 IEM_MC_REL_JMP_S8(i8Imm);
6450 } IEM_MC_ENDIF();
6451 IEM_MC_END();
6452 return VINF_SUCCESS;
6453}
6454
6455
6456/** Opcode 0x7c. */
6457FNIEMOP_DEF(iemOp_jl_Jb)
6458{
6459 IEMOP_MNEMONIC("jl/jnge Jb");
6460 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6461 IEMOP_HLP_NO_LOCK_PREFIX();
6462 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6463
6464 IEM_MC_BEGIN(0, 0);
6465 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6466 IEM_MC_REL_JMP_S8(i8Imm);
6467 } IEM_MC_ELSE() {
6468 IEM_MC_ADVANCE_RIP();
6469 } IEM_MC_ENDIF();
6470 IEM_MC_END();
6471 return VINF_SUCCESS;
6472}
6473
6474
6475/** Opcode 0x7d. */
6476FNIEMOP_DEF(iemOp_jnl_Jb)
6477{
6478 IEMOP_MNEMONIC("jnl/jge Jb");
6479 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6480 IEMOP_HLP_NO_LOCK_PREFIX();
6481 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6482
6483 IEM_MC_BEGIN(0, 0);
6484 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6485 IEM_MC_ADVANCE_RIP();
6486 } IEM_MC_ELSE() {
6487 IEM_MC_REL_JMP_S8(i8Imm);
6488 } IEM_MC_ENDIF();
6489 IEM_MC_END();
6490 return VINF_SUCCESS;
6491}
6492
6493
6494/** Opcode 0x7e. */
6495FNIEMOP_DEF(iemOp_jle_Jb)
6496{
6497 IEMOP_MNEMONIC("jle/jng Jb");
6498 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6499 IEMOP_HLP_NO_LOCK_PREFIX();
6500 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6501
6502 IEM_MC_BEGIN(0, 0);
6503 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6504 IEM_MC_REL_JMP_S8(i8Imm);
6505 } IEM_MC_ELSE() {
6506 IEM_MC_ADVANCE_RIP();
6507 } IEM_MC_ENDIF();
6508 IEM_MC_END();
6509 return VINF_SUCCESS;
6510}
6511
6512
6513/** Opcode 0x7f. */
6514FNIEMOP_DEF(iemOp_jnle_Jb)
6515{
6516 IEMOP_MNEMONIC("jnle/jg Jb");
6517 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6518 IEMOP_HLP_NO_LOCK_PREFIX();
6519 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6520
6521 IEM_MC_BEGIN(0, 0);
6522 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6523 IEM_MC_ADVANCE_RIP();
6524 } IEM_MC_ELSE() {
6525 IEM_MC_REL_JMP_S8(i8Imm);
6526 } IEM_MC_ENDIF();
6527 IEM_MC_END();
6528 return VINF_SUCCESS;
6529}
6530
6531
6532/** Opcode 0x80. */
6533FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6534{
6535 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6536 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6537 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6538
6539 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6540 {
6541 /* register target */
6542 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6543 IEMOP_HLP_NO_LOCK_PREFIX();
6544 IEM_MC_BEGIN(3, 0);
6545 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6546 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6547 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6548
6549 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6550 IEM_MC_REF_EFLAGS(pEFlags);
6551 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6552
6553 IEM_MC_ADVANCE_RIP();
6554 IEM_MC_END();
6555 }
6556 else
6557 {
6558 /* memory target */
6559 uint32_t fAccess;
6560 if (pImpl->pfnLockedU8)
6561 fAccess = IEM_ACCESS_DATA_RW;
6562 else
6563 { /* CMP */
6564 IEMOP_HLP_NO_LOCK_PREFIX();
6565 fAccess = IEM_ACCESS_DATA_R;
6566 }
6567 IEM_MC_BEGIN(3, 2);
6568 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6569 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6570 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6571
6572 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6573 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6574 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6575
6576 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6577 IEM_MC_FETCH_EFLAGS(EFlags);
6578 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6579 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6580 else
6581 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6582
6583 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6584 IEM_MC_COMMIT_EFLAGS(EFlags);
6585 IEM_MC_ADVANCE_RIP();
6586 IEM_MC_END();
6587 }
6588 return VINF_SUCCESS;
6589}
6590
6591
6592/** Opcode 0x81. */
6593FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6594{
6595 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6596 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6597 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6598
6599 switch (pIemCpu->enmEffOpSize)
6600 {
6601 case IEMMODE_16BIT:
6602 {
6603 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6604 {
6605 /* register target */
6606 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6607 IEMOP_HLP_NO_LOCK_PREFIX();
6608 IEM_MC_BEGIN(3, 0);
6609 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6610 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6611 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6612
6613 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6614 IEM_MC_REF_EFLAGS(pEFlags);
6615 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6616
6617 IEM_MC_ADVANCE_RIP();
6618 IEM_MC_END();
6619 }
6620 else
6621 {
6622 /* memory target */
6623 uint32_t fAccess;
6624 if (pImpl->pfnLockedU16)
6625 fAccess = IEM_ACCESS_DATA_RW;
6626 else
6627 { /* CMP, TEST */
6628 IEMOP_HLP_NO_LOCK_PREFIX();
6629 fAccess = IEM_ACCESS_DATA_R;
6630 }
6631 IEM_MC_BEGIN(3, 2);
6632 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6633 IEM_MC_ARG(uint16_t, u16Src, 1);
6634 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6635 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6636
6637 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6638 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6639 IEM_MC_ASSIGN(u16Src, u16Imm);
6640 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6641 IEM_MC_FETCH_EFLAGS(EFlags);
6642 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6643 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6644 else
6645 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6646
6647 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6648 IEM_MC_COMMIT_EFLAGS(EFlags);
6649 IEM_MC_ADVANCE_RIP();
6650 IEM_MC_END();
6651 }
6652 break;
6653 }
6654
6655 case IEMMODE_32BIT:
6656 {
6657 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6658 {
6659 /* register target */
6660 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6661 IEMOP_HLP_NO_LOCK_PREFIX();
6662 IEM_MC_BEGIN(3, 0);
6663 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6664 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6665 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6666
6667 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6668 IEM_MC_REF_EFLAGS(pEFlags);
6669 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6670
6671 IEM_MC_ADVANCE_RIP();
6672 IEM_MC_END();
6673 }
6674 else
6675 {
6676 /* memory target */
6677 uint32_t fAccess;
6678 if (pImpl->pfnLockedU32)
6679 fAccess = IEM_ACCESS_DATA_RW;
6680 else
6681 { /* CMP, TEST */
6682 IEMOP_HLP_NO_LOCK_PREFIX();
6683 fAccess = IEM_ACCESS_DATA_R;
6684 }
6685 IEM_MC_BEGIN(3, 2);
6686 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6687 IEM_MC_ARG(uint32_t, u32Src, 1);
6688 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6689 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6690
6691 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6692 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6693 IEM_MC_ASSIGN(u32Src, u32Imm);
6694 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6695 IEM_MC_FETCH_EFLAGS(EFlags);
6696 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6697 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6698 else
6699 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6700
6701 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6702 IEM_MC_COMMIT_EFLAGS(EFlags);
6703 IEM_MC_ADVANCE_RIP();
6704 IEM_MC_END();
6705 }
6706 break;
6707 }
6708
6709 case IEMMODE_64BIT:
6710 {
6711 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6712 {
6713 /* register target */
6714 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6715 IEMOP_HLP_NO_LOCK_PREFIX();
6716 IEM_MC_BEGIN(3, 0);
6717 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6718 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6719 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6720
6721 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6722 IEM_MC_REF_EFLAGS(pEFlags);
6723 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6724
6725 IEM_MC_ADVANCE_RIP();
6726 IEM_MC_END();
6727 }
6728 else
6729 {
6730 /* memory target */
6731 uint32_t fAccess;
6732 if (pImpl->pfnLockedU64)
6733 fAccess = IEM_ACCESS_DATA_RW;
6734 else
6735 { /* CMP */
6736 IEMOP_HLP_NO_LOCK_PREFIX();
6737 fAccess = IEM_ACCESS_DATA_R;
6738 }
6739 IEM_MC_BEGIN(3, 2);
6740 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6741 IEM_MC_ARG(uint64_t, u64Src, 1);
6742 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6743 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6744
6745 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6746 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6747 IEM_MC_ASSIGN(u64Src, u64Imm);
6748 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6749 IEM_MC_FETCH_EFLAGS(EFlags);
6750 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6751 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6752 else
6753 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6754
6755 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6756 IEM_MC_COMMIT_EFLAGS(EFlags);
6757 IEM_MC_ADVANCE_RIP();
6758 IEM_MC_END();
6759 }
6760 break;
6761 }
6762 }
6763 return VINF_SUCCESS;
6764}
6765
6766
6767/** Opcode 0x82. */
6768 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
6769{
6770 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
6771 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
6772}
6773
6774
6775/** Opcode 0x83. */
6776FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
6777{
6778 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6779 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
6780 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6781
6782 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6783 {
6784 /*
6785 * Register target
6786 */
6787 IEMOP_HLP_NO_LOCK_PREFIX();
6788 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6789 switch (pIemCpu->enmEffOpSize)
6790 {
6791 case IEMMODE_16BIT:
6792 {
6793 IEM_MC_BEGIN(3, 0);
6794 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6795 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
6796 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6797
6798 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6799 IEM_MC_REF_EFLAGS(pEFlags);
6800 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6801
6802 IEM_MC_ADVANCE_RIP();
6803 IEM_MC_END();
6804 break;
6805 }
6806
6807 case IEMMODE_32BIT:
6808 {
6809 IEM_MC_BEGIN(3, 0);
6810 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6811 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
6812 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6813
6814 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6815 IEM_MC_REF_EFLAGS(pEFlags);
6816 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6817
6818 IEM_MC_ADVANCE_RIP();
6819 IEM_MC_END();
6820 break;
6821 }
6822
6823 case IEMMODE_64BIT:
6824 {
6825 IEM_MC_BEGIN(3, 0);
6826 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6827 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
6828 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6829
6830 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6831 IEM_MC_REF_EFLAGS(pEFlags);
6832 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6833
6834 IEM_MC_ADVANCE_RIP();
6835 IEM_MC_END();
6836 break;
6837 }
6838 }
6839 }
6840 else
6841 {
6842 /*
6843 * Memory target.
6844 */
6845 uint32_t fAccess;
6846 if (pImpl->pfnLockedU16)
6847 fAccess = IEM_ACCESS_DATA_RW;
6848 else
6849 { /* CMP */
6850 IEMOP_HLP_NO_LOCK_PREFIX();
6851 fAccess = IEM_ACCESS_DATA_R;
6852 }
6853
6854 switch (pIemCpu->enmEffOpSize)
6855 {
6856 case IEMMODE_16BIT:
6857 {
6858 IEM_MC_BEGIN(3, 2);
6859 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6860 IEM_MC_ARG(uint16_t, u16Src, 1);
6861 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6862 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6863
6864 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6865 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6866 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
6867 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6868 IEM_MC_FETCH_EFLAGS(EFlags);
6869 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6870 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6871 else
6872 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6873
6874 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6875 IEM_MC_COMMIT_EFLAGS(EFlags);
6876 IEM_MC_ADVANCE_RIP();
6877 IEM_MC_END();
6878 break;
6879 }
6880
6881 case IEMMODE_32BIT:
6882 {
6883 IEM_MC_BEGIN(3, 2);
6884 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6885 IEM_MC_ARG(uint32_t, u32Src, 1);
6886 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6887 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6888
6889 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6890 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6891 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
6892 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6893 IEM_MC_FETCH_EFLAGS(EFlags);
6894 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6895 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6896 else
6897 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6898
6899 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6900 IEM_MC_COMMIT_EFLAGS(EFlags);
6901 IEM_MC_ADVANCE_RIP();
6902 IEM_MC_END();
6903 break;
6904 }
6905
6906 case IEMMODE_64BIT:
6907 {
6908 IEM_MC_BEGIN(3, 2);
6909 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6910 IEM_MC_ARG(uint64_t, u64Src, 1);
6911 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6912 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6913
6914 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6915 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6916 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
6917 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6918 IEM_MC_FETCH_EFLAGS(EFlags);
6919 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6920 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6921 else
6922 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6923
6924 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6925 IEM_MC_COMMIT_EFLAGS(EFlags);
6926 IEM_MC_ADVANCE_RIP();
6927 IEM_MC_END();
6928 break;
6929 }
6930 }
6931 }
6932 return VINF_SUCCESS;
6933}
6934
6935
6936/** Opcode 0x84. */
6937FNIEMOP_DEF(iemOp_test_Eb_Gb)
6938{
6939 IEMOP_MNEMONIC("test Eb,Gb");
6940 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6941 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6942 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
6943}
6944
6945
6946/** Opcode 0x85. */
6947FNIEMOP_DEF(iemOp_test_Ev_Gv)
6948{
6949 IEMOP_MNEMONIC("test Ev,Gv");
6950 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6951 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6952 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
6953}
6954
6955
6956/** Opcode 0x86. */
6957FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
6958{
6959 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6960 IEMOP_MNEMONIC("xchg Eb,Gb");
6961
6962 /*
6963 * If rm is denoting a register, no more instruction bytes.
6964 */
6965 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6966 {
6967 IEMOP_HLP_NO_LOCK_PREFIX();
6968
6969 IEM_MC_BEGIN(0, 2);
6970 IEM_MC_LOCAL(uint8_t, uTmp1);
6971 IEM_MC_LOCAL(uint8_t, uTmp2);
6972
6973 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6974 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6975 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6976 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6977
6978 IEM_MC_ADVANCE_RIP();
6979 IEM_MC_END();
6980 }
6981 else
6982 {
6983 /*
6984 * We're accessing memory.
6985 */
6986/** @todo the register must be committed separately! */
6987 IEM_MC_BEGIN(2, 2);
6988 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
6989 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6990 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6991
6992 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6993 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6994 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6995 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
6996 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
6997
6998 IEM_MC_ADVANCE_RIP();
6999 IEM_MC_END();
7000 }
7001 return VINF_SUCCESS;
7002}
7003
7004
7005/** Opcode 0x87. */
7006FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
7007{
7008 IEMOP_MNEMONIC("xchg Ev,Gv");
7009 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7010
7011 /*
7012 * If rm is denoting a register, no more instruction bytes.
7013 */
7014 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7015 {
7016 IEMOP_HLP_NO_LOCK_PREFIX();
7017
7018 switch (pIemCpu->enmEffOpSize)
7019 {
7020 case IEMMODE_16BIT:
7021 IEM_MC_BEGIN(0, 2);
7022 IEM_MC_LOCAL(uint16_t, uTmp1);
7023 IEM_MC_LOCAL(uint16_t, uTmp2);
7024
7025 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7026 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7027 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7028 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7029
7030 IEM_MC_ADVANCE_RIP();
7031 IEM_MC_END();
7032 return VINF_SUCCESS;
7033
7034 case IEMMODE_32BIT:
7035 IEM_MC_BEGIN(0, 2);
7036 IEM_MC_LOCAL(uint32_t, uTmp1);
7037 IEM_MC_LOCAL(uint32_t, uTmp2);
7038
7039 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7040 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7041 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7042 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7043
7044 IEM_MC_ADVANCE_RIP();
7045 IEM_MC_END();
7046 return VINF_SUCCESS;
7047
7048 case IEMMODE_64BIT:
7049 IEM_MC_BEGIN(0, 2);
7050 IEM_MC_LOCAL(uint64_t, uTmp1);
7051 IEM_MC_LOCAL(uint64_t, uTmp2);
7052
7053 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7054 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7055 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7056 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7057
7058 IEM_MC_ADVANCE_RIP();
7059 IEM_MC_END();
7060 return VINF_SUCCESS;
7061
7062 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7063 }
7064 }
7065 else
7066 {
7067 /*
7068 * We're accessing memory.
7069 */
7070 switch (pIemCpu->enmEffOpSize)
7071 {
7072/** @todo the register must be committed separately! */
7073 case IEMMODE_16BIT:
7074 IEM_MC_BEGIN(2, 2);
7075 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
7076 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
7077 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7078
7079 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7080 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7081 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7082 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
7083 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
7084
7085 IEM_MC_ADVANCE_RIP();
7086 IEM_MC_END();
7087 return VINF_SUCCESS;
7088
7089 case IEMMODE_32BIT:
7090 IEM_MC_BEGIN(2, 2);
7091 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
7092 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
7093 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7094
7095 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7096 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7097 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7098 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
7099 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
7100
7101 IEM_MC_ADVANCE_RIP();
7102 IEM_MC_END();
7103 return VINF_SUCCESS;
7104
7105 case IEMMODE_64BIT:
7106 IEM_MC_BEGIN(2, 2);
7107 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
7108 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
7109 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7110
7111 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7112 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7113 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7114 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
7115 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
7116
7117 IEM_MC_ADVANCE_RIP();
7118 IEM_MC_END();
7119 return VINF_SUCCESS;
7120
7121 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7122 }
7123 }
7124}
7125
7126
7127/** Opcode 0x88. */
7128FNIEMOP_DEF(iemOp_mov_Eb_Gb)
7129{
7130 IEMOP_MNEMONIC("mov Eb,Gb");
7131
7132 uint8_t bRm;
7133 IEM_OPCODE_GET_NEXT_U8(&bRm);
7134 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7135
7136 /*
7137 * If rm is denoting a register, no more instruction bytes.
7138 */
7139 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7140 {
7141 IEM_MC_BEGIN(0, 1);
7142 IEM_MC_LOCAL(uint8_t, u8Value);
7143 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7144 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
7145 IEM_MC_ADVANCE_RIP();
7146 IEM_MC_END();
7147 }
7148 else
7149 {
7150 /*
7151 * We're writing a register to memory.
7152 */
7153 IEM_MC_BEGIN(0, 2);
7154 IEM_MC_LOCAL(uint8_t, u8Value);
7155 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7156 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7157 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7158 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
7159 IEM_MC_ADVANCE_RIP();
7160 IEM_MC_END();
7161 }
7162 return VINF_SUCCESS;
7163
7164}
7165
7166
7167/** Opcode 0x89. */
7168FNIEMOP_DEF(iemOp_mov_Ev_Gv)
7169{
7170 IEMOP_MNEMONIC("mov Ev,Gv");
7171
7172 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7173 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7174
7175 /*
7176 * If rm is denoting a register, no more instruction bytes.
7177 */
7178 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7179 {
7180 switch (pIemCpu->enmEffOpSize)
7181 {
7182 case IEMMODE_16BIT:
7183 IEM_MC_BEGIN(0, 1);
7184 IEM_MC_LOCAL(uint16_t, u16Value);
7185 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7186 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7187 IEM_MC_ADVANCE_RIP();
7188 IEM_MC_END();
7189 break;
7190
7191 case IEMMODE_32BIT:
7192 IEM_MC_BEGIN(0, 1);
7193 IEM_MC_LOCAL(uint32_t, u32Value);
7194 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7195 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7196 IEM_MC_ADVANCE_RIP();
7197 IEM_MC_END();
7198 break;
7199
7200 case IEMMODE_64BIT:
7201 IEM_MC_BEGIN(0, 1);
7202 IEM_MC_LOCAL(uint64_t, u64Value);
7203 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7204 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7205 IEM_MC_ADVANCE_RIP();
7206 IEM_MC_END();
7207 break;
7208 }
7209 }
7210 else
7211 {
7212 /*
7213 * We're writing a register to memory.
7214 */
7215 switch (pIemCpu->enmEffOpSize)
7216 {
7217 case IEMMODE_16BIT:
7218 IEM_MC_BEGIN(0, 2);
7219 IEM_MC_LOCAL(uint16_t, u16Value);
7220 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7221 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7222 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7223 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7224 IEM_MC_ADVANCE_RIP();
7225 IEM_MC_END();
7226 break;
7227
7228 case IEMMODE_32BIT:
7229 IEM_MC_BEGIN(0, 2);
7230 IEM_MC_LOCAL(uint32_t, u32Value);
7231 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7232 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7233 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7234 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
7235 IEM_MC_ADVANCE_RIP();
7236 IEM_MC_END();
7237 break;
7238
7239 case IEMMODE_64BIT:
7240 IEM_MC_BEGIN(0, 2);
7241 IEM_MC_LOCAL(uint64_t, u64Value);
7242 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7243 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7244 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7245 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
7246 IEM_MC_ADVANCE_RIP();
7247 IEM_MC_END();
7248 break;
7249 }
7250 }
7251 return VINF_SUCCESS;
7252}
7253
7254
7255/** Opcode 0x8a. */
7256FNIEMOP_DEF(iemOp_mov_Gb_Eb)
7257{
7258 IEMOP_MNEMONIC("mov Gb,Eb");
7259
7260 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7261 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7262
7263 /*
7264 * If rm is denoting a register, no more instruction bytes.
7265 */
7266 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7267 {
7268 IEM_MC_BEGIN(0, 1);
7269 IEM_MC_LOCAL(uint8_t, u8Value);
7270 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7271 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7272 IEM_MC_ADVANCE_RIP();
7273 IEM_MC_END();
7274 }
7275 else
7276 {
7277 /*
7278 * We're loading a register from memory.
7279 */
7280 IEM_MC_BEGIN(0, 2);
7281 IEM_MC_LOCAL(uint8_t, u8Value);
7282 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7283 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7284 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
7285 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7286 IEM_MC_ADVANCE_RIP();
7287 IEM_MC_END();
7288 }
7289 return VINF_SUCCESS;
7290}
7291
7292
7293/** Opcode 0x8b. */
7294FNIEMOP_DEF(iemOp_mov_Gv_Ev)
7295{
7296 IEMOP_MNEMONIC("mov Gv,Ev");
7297
7298 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7299 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7300
7301 /*
7302 * If rm is denoting a register, no more instruction bytes.
7303 */
7304 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7305 {
7306 switch (pIemCpu->enmEffOpSize)
7307 {
7308 case IEMMODE_16BIT:
7309 IEM_MC_BEGIN(0, 1);
7310 IEM_MC_LOCAL(uint16_t, u16Value);
7311 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7312 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7313 IEM_MC_ADVANCE_RIP();
7314 IEM_MC_END();
7315 break;
7316
7317 case IEMMODE_32BIT:
7318 IEM_MC_BEGIN(0, 1);
7319 IEM_MC_LOCAL(uint32_t, u32Value);
7320 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7321 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7322 IEM_MC_ADVANCE_RIP();
7323 IEM_MC_END();
7324 break;
7325
7326 case IEMMODE_64BIT:
7327 IEM_MC_BEGIN(0, 1);
7328 IEM_MC_LOCAL(uint64_t, u64Value);
7329 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7330 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7331 IEM_MC_ADVANCE_RIP();
7332 IEM_MC_END();
7333 break;
7334 }
7335 }
7336 else
7337 {
7338 /*
7339 * We're loading a register from memory.
7340 */
7341 switch (pIemCpu->enmEffOpSize)
7342 {
7343 case IEMMODE_16BIT:
7344 IEM_MC_BEGIN(0, 2);
7345 IEM_MC_LOCAL(uint16_t, u16Value);
7346 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7347 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7348 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7349 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7350 IEM_MC_ADVANCE_RIP();
7351 IEM_MC_END();
7352 break;
7353
7354 case IEMMODE_32BIT:
7355 IEM_MC_BEGIN(0, 2);
7356 IEM_MC_LOCAL(uint32_t, u32Value);
7357 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7358 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7359 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7360 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7361 IEM_MC_ADVANCE_RIP();
7362 IEM_MC_END();
7363 break;
7364
7365 case IEMMODE_64BIT:
7366 IEM_MC_BEGIN(0, 2);
7367 IEM_MC_LOCAL(uint64_t, u64Value);
7368 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7369 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7370 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7371 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7372 IEM_MC_ADVANCE_RIP();
7373 IEM_MC_END();
7374 break;
7375 }
7376 }
7377 return VINF_SUCCESS;
7378}
7379
7380
7381/** Opcode 0x8c. */
7382FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7383{
7384 IEMOP_MNEMONIC("mov Ev,Sw");
7385
7386 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7387 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7388
7389 /*
7390 * Check that the destination register exists. The REX.R prefix is ignored.
7391 */
7392 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7393 if ( iSegReg > X86_SREG_GS)
7394 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7395
7396 /*
7397 * If rm is denoting a register, no more instruction bytes.
7398 * In that case, the operand size is respected and the upper bits are
7399 * cleared (starting with some pentium).
7400 */
7401 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7402 {
7403 switch (pIemCpu->enmEffOpSize)
7404 {
7405 case IEMMODE_16BIT:
7406 IEM_MC_BEGIN(0, 1);
7407 IEM_MC_LOCAL(uint16_t, u16Value);
7408 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7409 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7410 IEM_MC_ADVANCE_RIP();
7411 IEM_MC_END();
7412 break;
7413
7414 case IEMMODE_32BIT:
7415 IEM_MC_BEGIN(0, 1);
7416 IEM_MC_LOCAL(uint32_t, u32Value);
7417 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7418 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7419 IEM_MC_ADVANCE_RIP();
7420 IEM_MC_END();
7421 break;
7422
7423 case IEMMODE_64BIT:
7424 IEM_MC_BEGIN(0, 1);
7425 IEM_MC_LOCAL(uint64_t, u64Value);
7426 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7427 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7428 IEM_MC_ADVANCE_RIP();
7429 IEM_MC_END();
7430 break;
7431 }
7432 }
7433 else
7434 {
7435 /*
7436 * We're saving the register to memory. The access is word sized
7437 * regardless of operand size prefixes.
7438 */
7439#if 0 /* not necessary */
7440 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7441#endif
7442 IEM_MC_BEGIN(0, 2);
7443 IEM_MC_LOCAL(uint16_t, u16Value);
7444 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7445 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7446 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7447 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7448 IEM_MC_ADVANCE_RIP();
7449 IEM_MC_END();
7450 }
7451 return VINF_SUCCESS;
7452}
7453
7454
7455
7456
7457/** Opcode 0x8d. */
7458FNIEMOP_DEF(iemOp_lea_Gv_M)
7459{
7460 IEMOP_MNEMONIC("lea Gv,M");
7461 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7462 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7463 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7464 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7465
7466 switch (pIemCpu->enmEffOpSize)
7467 {
7468 case IEMMODE_16BIT:
7469 IEM_MC_BEGIN(0, 2);
7470 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7471 IEM_MC_LOCAL(uint16_t, u16Cast);
7472 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7473 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
7474 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
7475 IEM_MC_ADVANCE_RIP();
7476 IEM_MC_END();
7477 return VINF_SUCCESS;
7478
7479 case IEMMODE_32BIT:
7480 IEM_MC_BEGIN(0, 2);
7481 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7482 IEM_MC_LOCAL(uint32_t, u32Cast);
7483 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7484 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
7485 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
7486 IEM_MC_ADVANCE_RIP();
7487 IEM_MC_END();
7488 return VINF_SUCCESS;
7489
7490 case IEMMODE_64BIT:
7491 IEM_MC_BEGIN(0, 1);
7492 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7493 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7494 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7495 IEM_MC_ADVANCE_RIP();
7496 IEM_MC_END();
7497 return VINF_SUCCESS;
7498 }
7499 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7500}
7501
7502
7503/** Opcode 0x8e. */
7504FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7505{
7506 IEMOP_MNEMONIC("mov Sw,Ev");
7507
7508 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7509 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7510
7511 /*
7512 * The practical operand size is 16-bit.
7513 */
7514#if 0 /* not necessary */
7515 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7516#endif
7517
7518 /*
7519 * Check that the destination register exists and can be used with this
7520 * instruction. The REX.R prefix is ignored.
7521 */
7522 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7523 if ( iSegReg == X86_SREG_CS
7524 || iSegReg > X86_SREG_GS)
7525 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7526
7527 /*
7528 * If rm is denoting a register, no more instruction bytes.
7529 */
7530 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7531 {
7532 IEM_MC_BEGIN(2, 0);
7533 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7534 IEM_MC_ARG(uint16_t, u16Value, 1);
7535 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7536 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7537 IEM_MC_END();
7538 }
7539 else
7540 {
7541 /*
7542 * We're loading the register from memory. The access is word sized
7543 * regardless of operand size prefixes.
7544 */
7545 IEM_MC_BEGIN(2, 1);
7546 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7547 IEM_MC_ARG(uint16_t, u16Value, 1);
7548 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7549 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7550 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7551 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7552 IEM_MC_END();
7553 }
7554 return VINF_SUCCESS;
7555}
7556
7557
7558/** Opcode 0x8f. */
7559FNIEMOP_DEF(iemOp_pop_Ev)
7560{
7561 /* This bugger is rather annoying as it requires rSP to be updated before
7562 doing the effective address calculations. Will eventually require a
7563 split between the R/M+SIB decoding and the effective address
7564 calculation - which is something that is required for any attempt at
7565 reusing this code for a recompiler. It may also be good to have if we
7566 need to delay #UD exception caused by invalid lock prefixes.
7567
7568 For now, we'll do a mostly safe interpreter-only implementation here. */
7569 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7570 * now until tests show it's checked.. */
7571 IEMOP_MNEMONIC("pop Ev");
7572 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7573 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7574
7575 /* Register access is relatively easy and can share code. */
7576 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7577 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7578
7579 /*
7580 * Memory target.
7581 *
7582 * Intel says that RSP is incremented before it's used in any effective
7583 * address calcuations. This means some serious extra annoyance here since
7584 * we decode and caclulate the effective address in one step and like to
7585 * delay committing registers till everything is done.
7586 *
7587 * So, we'll decode and calculate the effective address twice. This will
7588 * require some recoding if turned into a recompiler.
7589 */
7590 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7591
7592#ifndef TST_IEM_CHECK_MC
7593 /* Calc effective address with modified ESP. */
7594 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7595 RTGCPTR GCPtrEff;
7596 VBOXSTRICTRC rcStrict;
7597 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7598 if (rcStrict != VINF_SUCCESS)
7599 return rcStrict;
7600 pIemCpu->offOpcode = offOpcodeSaved;
7601
7602 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7603 uint64_t const RspSaved = pCtx->rsp;
7604 switch (pIemCpu->enmEffOpSize)
7605 {
7606 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7607 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7608 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7610 }
7611 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7612 Assert(rcStrict == VINF_SUCCESS);
7613 pCtx->rsp = RspSaved;
7614
7615 /* Perform the operation - this should be CImpl. */
7616 RTUINT64U TmpRsp;
7617 TmpRsp.u = pCtx->rsp;
7618 switch (pIemCpu->enmEffOpSize)
7619 {
7620 case IEMMODE_16BIT:
7621 {
7622 uint16_t u16Value;
7623 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7624 if (rcStrict == VINF_SUCCESS)
7625 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7626 break;
7627 }
7628
7629 case IEMMODE_32BIT:
7630 {
7631 uint32_t u32Value;
7632 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7633 if (rcStrict == VINF_SUCCESS)
7634 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7635 break;
7636 }
7637
7638 case IEMMODE_64BIT:
7639 {
7640 uint64_t u64Value;
7641 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7642 if (rcStrict == VINF_SUCCESS)
7643 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7644 break;
7645 }
7646
7647 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7648 }
7649 if (rcStrict == VINF_SUCCESS)
7650 {
7651 pCtx->rsp = TmpRsp.u;
7652 iemRegUpdateRip(pIemCpu);
7653 }
7654 return rcStrict;
7655
7656#else
7657 return VERR_IEM_IPE_2;
7658#endif
7659}
7660
7661
7662/**
7663 * Common 'xchg reg,rAX' helper.
7664 */
7665FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7666{
7667 IEMOP_HLP_NO_LOCK_PREFIX();
7668
7669 iReg |= pIemCpu->uRexB;
7670 switch (pIemCpu->enmEffOpSize)
7671 {
7672 case IEMMODE_16BIT:
7673 IEM_MC_BEGIN(0, 2);
7674 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7675 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7676 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7677 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7678 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7679 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7680 IEM_MC_ADVANCE_RIP();
7681 IEM_MC_END();
7682 return VINF_SUCCESS;
7683
7684 case IEMMODE_32BIT:
7685 IEM_MC_BEGIN(0, 2);
7686 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7687 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7688 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7689 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7690 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7691 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7692 IEM_MC_ADVANCE_RIP();
7693 IEM_MC_END();
7694 return VINF_SUCCESS;
7695
7696 case IEMMODE_64BIT:
7697 IEM_MC_BEGIN(0, 2);
7698 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7699 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7700 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7701 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7702 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7703 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7704 IEM_MC_ADVANCE_RIP();
7705 IEM_MC_END();
7706 return VINF_SUCCESS;
7707
7708 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7709 }
7710}
7711
7712
7713/** Opcode 0x90. */
7714FNIEMOP_DEF(iemOp_nop)
7715{
7716 /* R8/R8D and RAX/EAX can be exchanged. */
7717 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7718 {
7719 IEMOP_MNEMONIC("xchg r8,rAX");
7720 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7721 }
7722
7723 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7724 IEMOP_MNEMONIC("pause");
7725 else
7726 IEMOP_MNEMONIC("nop");
7727 IEM_MC_BEGIN(0, 0);
7728 IEM_MC_ADVANCE_RIP();
7729 IEM_MC_END();
7730 return VINF_SUCCESS;
7731}
7732
7733
7734/** Opcode 0x91. */
7735FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7736{
7737 IEMOP_MNEMONIC("xchg rCX,rAX");
7738 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7739}
7740
7741
7742/** Opcode 0x92. */
7743FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7744{
7745 IEMOP_MNEMONIC("xchg rDX,rAX");
7746 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7747}
7748
7749
7750/** Opcode 0x93. */
7751FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7752{
7753 IEMOP_MNEMONIC("xchg rBX,rAX");
7754 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7755}
7756
7757
7758/** Opcode 0x94. */
7759FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7760{
7761 IEMOP_MNEMONIC("xchg rSX,rAX");
7762 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
7763}
7764
7765
7766/** Opcode 0x95. */
7767FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
7768{
7769 IEMOP_MNEMONIC("xchg rBP,rAX");
7770 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
7771}
7772
7773
7774/** Opcode 0x96. */
7775FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
7776{
7777 IEMOP_MNEMONIC("xchg rSI,rAX");
7778 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
7779}
7780
7781
7782/** Opcode 0x97. */
7783FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
7784{
7785 IEMOP_MNEMONIC("xchg rDI,rAX");
7786 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
7787}
7788
7789
7790/** Opcode 0x98. */
7791FNIEMOP_DEF(iemOp_cbw)
7792{
7793 IEMOP_HLP_NO_LOCK_PREFIX();
7794 switch (pIemCpu->enmEffOpSize)
7795 {
7796 case IEMMODE_16BIT:
7797 IEMOP_MNEMONIC("cbw");
7798 IEM_MC_BEGIN(0, 1);
7799 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
7800 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
7801 } IEM_MC_ELSE() {
7802 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
7803 } IEM_MC_ENDIF();
7804 IEM_MC_ADVANCE_RIP();
7805 IEM_MC_END();
7806 return VINF_SUCCESS;
7807
7808 case IEMMODE_32BIT:
7809 IEMOP_MNEMONIC("cwde");
7810 IEM_MC_BEGIN(0, 1);
7811 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7812 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
7813 } IEM_MC_ELSE() {
7814 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
7815 } IEM_MC_ENDIF();
7816 IEM_MC_ADVANCE_RIP();
7817 IEM_MC_END();
7818 return VINF_SUCCESS;
7819
7820 case IEMMODE_64BIT:
7821 IEMOP_MNEMONIC("cdqe");
7822 IEM_MC_BEGIN(0, 1);
7823 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7824 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
7825 } IEM_MC_ELSE() {
7826 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
7827 } IEM_MC_ENDIF();
7828 IEM_MC_ADVANCE_RIP();
7829 IEM_MC_END();
7830 return VINF_SUCCESS;
7831
7832 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7833 }
7834}
7835
7836
7837/** Opcode 0x99. */
7838FNIEMOP_DEF(iemOp_cwd)
7839{
7840 IEMOP_HLP_NO_LOCK_PREFIX();
7841 switch (pIemCpu->enmEffOpSize)
7842 {
7843 case IEMMODE_16BIT:
7844 IEMOP_MNEMONIC("cwd");
7845 IEM_MC_BEGIN(0, 1);
7846 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7847 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
7848 } IEM_MC_ELSE() {
7849 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
7850 } IEM_MC_ENDIF();
7851 IEM_MC_ADVANCE_RIP();
7852 IEM_MC_END();
7853 return VINF_SUCCESS;
7854
7855 case IEMMODE_32BIT:
7856 IEMOP_MNEMONIC("cdq");
7857 IEM_MC_BEGIN(0, 1);
7858 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7859 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
7860 } IEM_MC_ELSE() {
7861 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
7862 } IEM_MC_ENDIF();
7863 IEM_MC_ADVANCE_RIP();
7864 IEM_MC_END();
7865 return VINF_SUCCESS;
7866
7867 case IEMMODE_64BIT:
7868 IEMOP_MNEMONIC("cqo");
7869 IEM_MC_BEGIN(0, 1);
7870 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
7871 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
7872 } IEM_MC_ELSE() {
7873 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
7874 } IEM_MC_ENDIF();
7875 IEM_MC_ADVANCE_RIP();
7876 IEM_MC_END();
7877 return VINF_SUCCESS;
7878
7879 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7880 }
7881}
7882
7883
7884/** Opcode 0x9a. */
7885FNIEMOP_DEF(iemOp_call_Ap)
7886{
7887 IEMOP_MNEMONIC("call Ap");
7888 IEMOP_HLP_NO_64BIT();
7889
7890 /* Decode the far pointer address and pass it on to the far call C implementation. */
7891 uint32_t offSeg;
7892 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
7893 IEM_OPCODE_GET_NEXT_U32(&offSeg);
7894 else
7895 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
7896 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
7897 IEMOP_HLP_NO_LOCK_PREFIX();
7898 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
7899}
7900
7901
7902/** Opcode 0x9b. (aka fwait) */
7903FNIEMOP_DEF(iemOp_wait)
7904{
7905 IEMOP_MNEMONIC("wait");
7906 IEMOP_HLP_NO_LOCK_PREFIX();
7907
7908 IEM_MC_BEGIN(0, 0);
7909 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
7910 IEM_MC_MAYBE_RAISE_FPU_XCPT();
7911 IEM_MC_ADVANCE_RIP();
7912 IEM_MC_END();
7913 return VINF_SUCCESS;
7914}
7915
7916
7917/** Opcode 0x9c. */
7918FNIEMOP_DEF(iemOp_pushf_Fv)
7919{
7920 IEMOP_HLP_NO_LOCK_PREFIX();
7921 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7922 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
7923}
7924
7925
7926/** Opcode 0x9d. */
7927FNIEMOP_DEF(iemOp_popf_Fv)
7928{
7929 IEMOP_HLP_NO_LOCK_PREFIX();
7930 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7931 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
7932}
7933
7934
7935/** Opcode 0x9e. */
7936FNIEMOP_DEF(iemOp_sahf)
7937{
7938 IEMOP_MNEMONIC("sahf");
7939 IEMOP_HLP_NO_LOCK_PREFIX();
7940 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
7941 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
7942 return IEMOP_RAISE_INVALID_OPCODE();
7943 IEM_MC_BEGIN(0, 2);
7944 IEM_MC_LOCAL(uint32_t, u32Flags);
7945 IEM_MC_LOCAL(uint32_t, EFlags);
7946 IEM_MC_FETCH_EFLAGS(EFlags);
7947 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
7948 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
7949 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
7950 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
7951 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
7952 IEM_MC_COMMIT_EFLAGS(EFlags);
7953 IEM_MC_ADVANCE_RIP();
7954 IEM_MC_END();
7955 return VINF_SUCCESS;
7956}
7957
7958
7959/** Opcode 0x9f. */
7960FNIEMOP_DEF(iemOp_lahf)
7961{
7962 IEMOP_MNEMONIC("lahf");
7963 IEMOP_HLP_NO_LOCK_PREFIX();
7964 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
7965 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
7966 return IEMOP_RAISE_INVALID_OPCODE();
7967 IEM_MC_BEGIN(0, 1);
7968 IEM_MC_LOCAL(uint8_t, u8Flags);
7969 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
7970 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
7971 IEM_MC_ADVANCE_RIP();
7972 IEM_MC_END();
7973 return VINF_SUCCESS;
7974}
7975
7976
7977/**
7978 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
7979 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
7980 * prefixes. Will return on failures.
7981 * @param a_GCPtrMemOff The variable to store the offset in.
7982 */
7983#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
7984 do \
7985 { \
7986 switch (pIemCpu->enmEffAddrMode) \
7987 { \
7988 case IEMMODE_16BIT: \
7989 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
7990 break; \
7991 case IEMMODE_32BIT: \
7992 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
7993 break; \
7994 case IEMMODE_64BIT: \
7995 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
7996 break; \
7997 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
7998 } \
7999 IEMOP_HLP_NO_LOCK_PREFIX(); \
8000 } while (0)
8001
8002/** Opcode 0xa0. */
8003FNIEMOP_DEF(iemOp_mov_Al_Ob)
8004{
8005 /*
8006 * Get the offset and fend of lock prefixes.
8007 */
8008 RTGCPTR GCPtrMemOff;
8009 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8010
8011 /*
8012 * Fetch AL.
8013 */
8014 IEM_MC_BEGIN(0,1);
8015 IEM_MC_LOCAL(uint8_t, u8Tmp);
8016 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8017 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
8018 IEM_MC_ADVANCE_RIP();
8019 IEM_MC_END();
8020 return VINF_SUCCESS;
8021}
8022
8023
8024/** Opcode 0xa1. */
8025FNIEMOP_DEF(iemOp_mov_rAX_Ov)
8026{
8027 /*
8028 * Get the offset and fend of lock prefixes.
8029 */
8030 IEMOP_MNEMONIC("mov rAX,Ov");
8031 RTGCPTR GCPtrMemOff;
8032 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8033
8034 /*
8035 * Fetch rAX.
8036 */
8037 switch (pIemCpu->enmEffOpSize)
8038 {
8039 case IEMMODE_16BIT:
8040 IEM_MC_BEGIN(0,1);
8041 IEM_MC_LOCAL(uint16_t, u16Tmp);
8042 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8043 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
8044 IEM_MC_ADVANCE_RIP();
8045 IEM_MC_END();
8046 return VINF_SUCCESS;
8047
8048 case IEMMODE_32BIT:
8049 IEM_MC_BEGIN(0,1);
8050 IEM_MC_LOCAL(uint32_t, u32Tmp);
8051 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8052 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
8053 IEM_MC_ADVANCE_RIP();
8054 IEM_MC_END();
8055 return VINF_SUCCESS;
8056
8057 case IEMMODE_64BIT:
8058 IEM_MC_BEGIN(0,1);
8059 IEM_MC_LOCAL(uint64_t, u64Tmp);
8060 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8061 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
8062 IEM_MC_ADVANCE_RIP();
8063 IEM_MC_END();
8064 return VINF_SUCCESS;
8065
8066 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8067 }
8068}
8069
8070
8071/** Opcode 0xa2. */
8072FNIEMOP_DEF(iemOp_mov_Ob_AL)
8073{
8074 /*
8075 * Get the offset and fend of lock prefixes.
8076 */
8077 RTGCPTR GCPtrMemOff;
8078 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8079
8080 /*
8081 * Store AL.
8082 */
8083 IEM_MC_BEGIN(0,1);
8084 IEM_MC_LOCAL(uint8_t, u8Tmp);
8085 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
8086 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
8087 IEM_MC_ADVANCE_RIP();
8088 IEM_MC_END();
8089 return VINF_SUCCESS;
8090}
8091
8092
8093/** Opcode 0xa3. */
8094FNIEMOP_DEF(iemOp_mov_Ov_rAX)
8095{
8096 /*
8097 * Get the offset and fend of lock prefixes.
8098 */
8099 RTGCPTR GCPtrMemOff;
8100 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8101
8102 /*
8103 * Store rAX.
8104 */
8105 switch (pIemCpu->enmEffOpSize)
8106 {
8107 case IEMMODE_16BIT:
8108 IEM_MC_BEGIN(0,1);
8109 IEM_MC_LOCAL(uint16_t, u16Tmp);
8110 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
8111 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
8112 IEM_MC_ADVANCE_RIP();
8113 IEM_MC_END();
8114 return VINF_SUCCESS;
8115
8116 case IEMMODE_32BIT:
8117 IEM_MC_BEGIN(0,1);
8118 IEM_MC_LOCAL(uint32_t, u32Tmp);
8119 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
8120 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
8121 IEM_MC_ADVANCE_RIP();
8122 IEM_MC_END();
8123 return VINF_SUCCESS;
8124
8125 case IEMMODE_64BIT:
8126 IEM_MC_BEGIN(0,1);
8127 IEM_MC_LOCAL(uint64_t, u64Tmp);
8128 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
8129 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
8130 IEM_MC_ADVANCE_RIP();
8131 IEM_MC_END();
8132 return VINF_SUCCESS;
8133
8134 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8135 }
8136}
8137
8138/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
8139#define IEM_MOVS_CASE(ValBits, AddrBits) \
8140 IEM_MC_BEGIN(0, 2); \
8141 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8142 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8143 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8144 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8145 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8146 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8147 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8148 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8149 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8150 } IEM_MC_ELSE() { \
8151 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8152 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8153 } IEM_MC_ENDIF(); \
8154 IEM_MC_ADVANCE_RIP(); \
8155 IEM_MC_END();
8156
8157/** Opcode 0xa4. */
8158FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
8159{
8160 IEMOP_HLP_NO_LOCK_PREFIX();
8161
8162 /*
8163 * Use the C implementation if a repeat prefix is encountered.
8164 */
8165 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8166 {
8167 IEMOP_MNEMONIC("rep movsb Xb,Yb");
8168 switch (pIemCpu->enmEffAddrMode)
8169 {
8170 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
8171 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
8172 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
8173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8174 }
8175 }
8176 IEMOP_MNEMONIC("movsb Xb,Yb");
8177
8178 /*
8179 * Sharing case implementation with movs[wdq] below.
8180 */
8181 switch (pIemCpu->enmEffAddrMode)
8182 {
8183 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
8184 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
8185 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
8186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8187 }
8188 return VINF_SUCCESS;
8189}
8190
8191
8192/** Opcode 0xa5. */
8193FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
8194{
8195 IEMOP_HLP_NO_LOCK_PREFIX();
8196
8197 /*
8198 * Use the C implementation if a repeat prefix is encountered.
8199 */
8200 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8201 {
8202 IEMOP_MNEMONIC("rep movs Xv,Yv");
8203 switch (pIemCpu->enmEffOpSize)
8204 {
8205 case IEMMODE_16BIT:
8206 switch (pIemCpu->enmEffAddrMode)
8207 {
8208 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
8209 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
8210 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
8211 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8212 }
8213 break;
8214 case IEMMODE_32BIT:
8215 switch (pIemCpu->enmEffAddrMode)
8216 {
8217 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
8218 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
8219 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
8220 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8221 }
8222 case IEMMODE_64BIT:
8223 switch (pIemCpu->enmEffAddrMode)
8224 {
8225 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8226 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
8227 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
8228 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8229 }
8230 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8231 }
8232 }
8233 IEMOP_MNEMONIC("movs Xv,Yv");
8234
8235 /*
8236 * Annoying double switch here.
8237 * Using ugly macro for implementing the cases, sharing it with movsb.
8238 */
8239 switch (pIemCpu->enmEffOpSize)
8240 {
8241 case IEMMODE_16BIT:
8242 switch (pIemCpu->enmEffAddrMode)
8243 {
8244 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
8245 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
8246 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
8247 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8248 }
8249 break;
8250
8251 case IEMMODE_32BIT:
8252 switch (pIemCpu->enmEffAddrMode)
8253 {
8254 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
8255 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
8256 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
8257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8258 }
8259 break;
8260
8261 case IEMMODE_64BIT:
8262 switch (pIemCpu->enmEffAddrMode)
8263 {
8264 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8265 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
8266 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
8267 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8268 }
8269 break;
8270 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8271 }
8272 return VINF_SUCCESS;
8273}
8274
8275#undef IEM_MOVS_CASE
8276
8277/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
8278#define IEM_CMPS_CASE(ValBits, AddrBits) \
8279 IEM_MC_BEGIN(3, 3); \
8280 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
8281 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
8282 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8283 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
8284 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8285 \
8286 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8287 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
8288 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8289 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
8290 IEM_MC_REF_LOCAL(puValue1, uValue1); \
8291 IEM_MC_REF_EFLAGS(pEFlags); \
8292 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
8293 \
8294 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8295 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8296 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8297 } IEM_MC_ELSE() { \
8298 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8299 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8300 } IEM_MC_ENDIF(); \
8301 IEM_MC_ADVANCE_RIP(); \
8302 IEM_MC_END(); \
8303
8304/** Opcode 0xa6. */
8305FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
8306{
8307 IEMOP_HLP_NO_LOCK_PREFIX();
8308
8309 /*
8310 * Use the C implementation if a repeat prefix is encountered.
8311 */
8312 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8313 {
8314 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8315 switch (pIemCpu->enmEffAddrMode)
8316 {
8317 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
8318 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
8319 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
8320 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8321 }
8322 }
8323 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8324 {
8325 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8326 switch (pIemCpu->enmEffAddrMode)
8327 {
8328 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
8329 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
8330 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
8331 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8332 }
8333 }
8334 IEMOP_MNEMONIC("cmps Xb,Yb");
8335
8336 /*
8337 * Sharing case implementation with cmps[wdq] below.
8338 */
8339 switch (pIemCpu->enmEffAddrMode)
8340 {
8341 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
8342 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
8343 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
8344 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8345 }
8346 return VINF_SUCCESS;
8347
8348}
8349
8350
8351/** Opcode 0xa7. */
8352FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
8353{
8354 IEMOP_HLP_NO_LOCK_PREFIX();
8355
8356 /*
8357 * Use the C implementation if a repeat prefix is encountered.
8358 */
8359 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8360 {
8361 IEMOP_MNEMONIC("repe cmps Xv,Yv");
8362 switch (pIemCpu->enmEffOpSize)
8363 {
8364 case IEMMODE_16BIT:
8365 switch (pIemCpu->enmEffAddrMode)
8366 {
8367 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
8368 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
8369 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
8370 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8371 }
8372 break;
8373 case IEMMODE_32BIT:
8374 switch (pIemCpu->enmEffAddrMode)
8375 {
8376 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
8377 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
8378 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
8379 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8380 }
8381 case IEMMODE_64BIT:
8382 switch (pIemCpu->enmEffAddrMode)
8383 {
8384 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8385 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
8386 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
8387 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8388 }
8389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8390 }
8391 }
8392
8393 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8394 {
8395 IEMOP_MNEMONIC("repne cmps Xv,Yv");
8396 switch (pIemCpu->enmEffOpSize)
8397 {
8398 case IEMMODE_16BIT:
8399 switch (pIemCpu->enmEffAddrMode)
8400 {
8401 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
8402 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
8403 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
8404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8405 }
8406 break;
8407 case IEMMODE_32BIT:
8408 switch (pIemCpu->enmEffAddrMode)
8409 {
8410 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8411 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8412 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8413 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8414 }
8415 case IEMMODE_64BIT:
8416 switch (pIemCpu->enmEffAddrMode)
8417 {
8418 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8419 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8420 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8421 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8422 }
8423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8424 }
8425 }
8426
8427 IEMOP_MNEMONIC("cmps Xv,Yv");
8428
8429 /*
8430 * Annoying double switch here.
8431 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8432 */
8433 switch (pIemCpu->enmEffOpSize)
8434 {
8435 case IEMMODE_16BIT:
8436 switch (pIemCpu->enmEffAddrMode)
8437 {
8438 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8439 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8440 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8441 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8442 }
8443 break;
8444
8445 case IEMMODE_32BIT:
8446 switch (pIemCpu->enmEffAddrMode)
8447 {
8448 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8449 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8450 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8451 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8452 }
8453 break;
8454
8455 case IEMMODE_64BIT:
8456 switch (pIemCpu->enmEffAddrMode)
8457 {
8458 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8459 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8460 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8461 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8462 }
8463 break;
8464 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8465 }
8466 return VINF_SUCCESS;
8467
8468}
8469
8470#undef IEM_CMPS_CASE
8471
8472/** Opcode 0xa8. */
8473FNIEMOP_DEF(iemOp_test_AL_Ib)
8474{
8475 IEMOP_MNEMONIC("test al,Ib");
8476 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8477 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8478}
8479
8480
8481/** Opcode 0xa9. */
8482FNIEMOP_DEF(iemOp_test_eAX_Iz)
8483{
8484 IEMOP_MNEMONIC("test rAX,Iz");
8485 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8486 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8487}
8488
8489
8490/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8491#define IEM_STOS_CASE(ValBits, AddrBits) \
8492 IEM_MC_BEGIN(0, 2); \
8493 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8494 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8495 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8496 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8497 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8498 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8499 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8500 } IEM_MC_ELSE() { \
8501 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8502 } IEM_MC_ENDIF(); \
8503 IEM_MC_ADVANCE_RIP(); \
8504 IEM_MC_END(); \
8505
8506/** Opcode 0xaa. */
8507FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8508{
8509 IEMOP_HLP_NO_LOCK_PREFIX();
8510
8511 /*
8512 * Use the C implementation if a repeat prefix is encountered.
8513 */
8514 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8515 {
8516 IEMOP_MNEMONIC("rep stos Yb,al");
8517 switch (pIemCpu->enmEffAddrMode)
8518 {
8519 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8520 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8521 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8522 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8523 }
8524 }
8525 IEMOP_MNEMONIC("stos Yb,al");
8526
8527 /*
8528 * Sharing case implementation with stos[wdq] below.
8529 */
8530 switch (pIemCpu->enmEffAddrMode)
8531 {
8532 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8533 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8534 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8535 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8536 }
8537 return VINF_SUCCESS;
8538}
8539
8540
8541/** Opcode 0xab. */
8542FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8543{
8544 IEMOP_HLP_NO_LOCK_PREFIX();
8545
8546 /*
8547 * Use the C implementation if a repeat prefix is encountered.
8548 */
8549 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8550 {
8551 IEMOP_MNEMONIC("rep stos Yv,rAX");
8552 switch (pIemCpu->enmEffOpSize)
8553 {
8554 case IEMMODE_16BIT:
8555 switch (pIemCpu->enmEffAddrMode)
8556 {
8557 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8558 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8559 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8560 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8561 }
8562 break;
8563 case IEMMODE_32BIT:
8564 switch (pIemCpu->enmEffAddrMode)
8565 {
8566 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8567 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8568 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8569 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8570 }
8571 case IEMMODE_64BIT:
8572 switch (pIemCpu->enmEffAddrMode)
8573 {
8574 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8575 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8576 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8577 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8578 }
8579 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8580 }
8581 }
8582 IEMOP_MNEMONIC("stos Yv,rAX");
8583
8584 /*
8585 * Annoying double switch here.
8586 * Using ugly macro for implementing the cases, sharing it with stosb.
8587 */
8588 switch (pIemCpu->enmEffOpSize)
8589 {
8590 case IEMMODE_16BIT:
8591 switch (pIemCpu->enmEffAddrMode)
8592 {
8593 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8594 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8595 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8596 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8597 }
8598 break;
8599
8600 case IEMMODE_32BIT:
8601 switch (pIemCpu->enmEffAddrMode)
8602 {
8603 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8604 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8605 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8606 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8607 }
8608 break;
8609
8610 case IEMMODE_64BIT:
8611 switch (pIemCpu->enmEffAddrMode)
8612 {
8613 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8614 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8615 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8616 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8617 }
8618 break;
8619 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8620 }
8621 return VINF_SUCCESS;
8622}
8623
8624#undef IEM_STOS_CASE
8625
8626/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8627#define IEM_LODS_CASE(ValBits, AddrBits) \
8628 IEM_MC_BEGIN(0, 2); \
8629 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8630 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8631 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8632 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8633 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8634 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8635 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8636 } IEM_MC_ELSE() { \
8637 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8638 } IEM_MC_ENDIF(); \
8639 IEM_MC_ADVANCE_RIP(); \
8640 IEM_MC_END();
8641
8642/** Opcode 0xac. */
8643FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8644{
8645 IEMOP_HLP_NO_LOCK_PREFIX();
8646
8647 /*
8648 * Use the C implementation if a repeat prefix is encountered.
8649 */
8650 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8651 {
8652 IEMOP_MNEMONIC("rep lodsb al,Xb");
8653 switch (pIemCpu->enmEffAddrMode)
8654 {
8655 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8656 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8657 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8659 }
8660 }
8661 IEMOP_MNEMONIC("lodsb al,Xb");
8662
8663 /*
8664 * Sharing case implementation with stos[wdq] below.
8665 */
8666 switch (pIemCpu->enmEffAddrMode)
8667 {
8668 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8669 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8670 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8671 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8672 }
8673 return VINF_SUCCESS;
8674}
8675
8676
8677/** Opcode 0xad. */
8678FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8679{
8680 IEMOP_HLP_NO_LOCK_PREFIX();
8681
8682 /*
8683 * Use the C implementation if a repeat prefix is encountered.
8684 */
8685 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8686 {
8687 IEMOP_MNEMONIC("rep lods rAX,Xv");
8688 switch (pIemCpu->enmEffOpSize)
8689 {
8690 case IEMMODE_16BIT:
8691 switch (pIemCpu->enmEffAddrMode)
8692 {
8693 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8694 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8695 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8696 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8697 }
8698 break;
8699 case IEMMODE_32BIT:
8700 switch (pIemCpu->enmEffAddrMode)
8701 {
8702 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8703 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8704 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8705 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8706 }
8707 case IEMMODE_64BIT:
8708 switch (pIemCpu->enmEffAddrMode)
8709 {
8710 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8711 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8712 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8714 }
8715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8716 }
8717 }
8718 IEMOP_MNEMONIC("lods rAX,Xv");
8719
8720 /*
8721 * Annoying double switch here.
8722 * Using ugly macro for implementing the cases, sharing it with lodsb.
8723 */
8724 switch (pIemCpu->enmEffOpSize)
8725 {
8726 case IEMMODE_16BIT:
8727 switch (pIemCpu->enmEffAddrMode)
8728 {
8729 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8730 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8731 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8732 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8733 }
8734 break;
8735
8736 case IEMMODE_32BIT:
8737 switch (pIemCpu->enmEffAddrMode)
8738 {
8739 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8740 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8741 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8742 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8743 }
8744 break;
8745
8746 case IEMMODE_64BIT:
8747 switch (pIemCpu->enmEffAddrMode)
8748 {
8749 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8750 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8751 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8753 }
8754 break;
8755 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8756 }
8757 return VINF_SUCCESS;
8758}
8759
8760#undef IEM_LODS_CASE
8761
8762/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
8763#define IEM_SCAS_CASE(ValBits, AddrBits) \
8764 IEM_MC_BEGIN(3, 2); \
8765 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
8766 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
8767 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8768 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8769 \
8770 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8771 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
8772 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
8773 IEM_MC_REF_EFLAGS(pEFlags); \
8774 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
8775 \
8776 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8777 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8778 } IEM_MC_ELSE() { \
8779 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8780 } IEM_MC_ENDIF(); \
8781 IEM_MC_ADVANCE_RIP(); \
8782 IEM_MC_END();
8783
8784/** Opcode 0xae. */
8785FNIEMOP_DEF(iemOp_scasb_AL_Xb)
8786{
8787 IEMOP_HLP_NO_LOCK_PREFIX();
8788
8789 /*
8790 * Use the C implementation if a repeat prefix is encountered.
8791 */
8792 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8793 {
8794 IEMOP_MNEMONIC("repe scasb al,Xb");
8795 switch (pIemCpu->enmEffAddrMode)
8796 {
8797 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
8798 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
8799 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
8800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8801 }
8802 }
8803 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8804 {
8805 IEMOP_MNEMONIC("repne scasb al,Xb");
8806 switch (pIemCpu->enmEffAddrMode)
8807 {
8808 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
8809 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
8810 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
8811 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8812 }
8813 }
8814 IEMOP_MNEMONIC("scasb al,Xb");
8815
8816 /*
8817 * Sharing case implementation with stos[wdq] below.
8818 */
8819 switch (pIemCpu->enmEffAddrMode)
8820 {
8821 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
8822 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
8823 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
8824 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8825 }
8826 return VINF_SUCCESS;
8827}
8828
8829
8830/** Opcode 0xaf. */
8831FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
8832{
8833 IEMOP_HLP_NO_LOCK_PREFIX();
8834
8835 /*
8836 * Use the C implementation if a repeat prefix is encountered.
8837 */
8838 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8839 {
8840 IEMOP_MNEMONIC("repe scas rAX,Xv");
8841 switch (pIemCpu->enmEffOpSize)
8842 {
8843 case IEMMODE_16BIT:
8844 switch (pIemCpu->enmEffAddrMode)
8845 {
8846 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8847 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8848 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8850 }
8851 break;
8852 case IEMMODE_32BIT:
8853 switch (pIemCpu->enmEffAddrMode)
8854 {
8855 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8856 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8857 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8859 }
8860 case IEMMODE_64BIT:
8861 switch (pIemCpu->enmEffAddrMode)
8862 {
8863 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
8864 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8865 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8866 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8867 }
8868 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8869 }
8870 }
8871 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8872 {
8873 IEMOP_MNEMONIC("repne scas rAX,Xv");
8874 switch (pIemCpu->enmEffOpSize)
8875 {
8876 case IEMMODE_16BIT:
8877 switch (pIemCpu->enmEffAddrMode)
8878 {
8879 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8880 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8881 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8882 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8883 }
8884 break;
8885 case IEMMODE_32BIT:
8886 switch (pIemCpu->enmEffAddrMode)
8887 {
8888 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8889 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8890 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8892 }
8893 case IEMMODE_64BIT:
8894 switch (pIemCpu->enmEffAddrMode)
8895 {
8896 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8897 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8898 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8899 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8900 }
8901 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8902 }
8903 }
8904 IEMOP_MNEMONIC("scas rAX,Xv");
8905
8906 /*
8907 * Annoying double switch here.
8908 * Using ugly macro for implementing the cases, sharing it with scasb.
8909 */
8910 switch (pIemCpu->enmEffOpSize)
8911 {
8912 case IEMMODE_16BIT:
8913 switch (pIemCpu->enmEffAddrMode)
8914 {
8915 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
8916 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
8917 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
8918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8919 }
8920 break;
8921
8922 case IEMMODE_32BIT:
8923 switch (pIemCpu->enmEffAddrMode)
8924 {
8925 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
8926 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
8927 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
8928 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8929 }
8930 break;
8931
8932 case IEMMODE_64BIT:
8933 switch (pIemCpu->enmEffAddrMode)
8934 {
8935 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8936 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
8937 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
8938 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8939 }
8940 break;
8941 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8942 }
8943 return VINF_SUCCESS;
8944}
8945
8946#undef IEM_SCAS_CASE
8947
8948/**
8949 * Common 'mov r8, imm8' helper.
8950 */
8951FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
8952{
8953 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8954 IEMOP_HLP_NO_LOCK_PREFIX();
8955
8956 IEM_MC_BEGIN(0, 1);
8957 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
8958 IEM_MC_STORE_GREG_U8(iReg, u8Value);
8959 IEM_MC_ADVANCE_RIP();
8960 IEM_MC_END();
8961
8962 return VINF_SUCCESS;
8963}
8964
8965
8966/** Opcode 0xb0. */
8967FNIEMOP_DEF(iemOp_mov_AL_Ib)
8968{
8969 IEMOP_MNEMONIC("mov AL,Ib");
8970 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
8971}
8972
8973
8974/** Opcode 0xb1. */
8975FNIEMOP_DEF(iemOp_CL_Ib)
8976{
8977 IEMOP_MNEMONIC("mov CL,Ib");
8978 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
8979}
8980
8981
8982/** Opcode 0xb2. */
8983FNIEMOP_DEF(iemOp_DL_Ib)
8984{
8985 IEMOP_MNEMONIC("mov DL,Ib");
8986 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
8987}
8988
8989
8990/** Opcode 0xb3. */
8991FNIEMOP_DEF(iemOp_BL_Ib)
8992{
8993 IEMOP_MNEMONIC("mov BL,Ib");
8994 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
8995}
8996
8997
8998/** Opcode 0xb4. */
8999FNIEMOP_DEF(iemOp_mov_AH_Ib)
9000{
9001 IEMOP_MNEMONIC("mov AH,Ib");
9002 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
9003}
9004
9005
9006/** Opcode 0xb5. */
9007FNIEMOP_DEF(iemOp_CH_Ib)
9008{
9009 IEMOP_MNEMONIC("mov CH,Ib");
9010 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
9011}
9012
9013
9014/** Opcode 0xb6. */
9015FNIEMOP_DEF(iemOp_DH_Ib)
9016{
9017 IEMOP_MNEMONIC("mov DH,Ib");
9018 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
9019}
9020
9021
9022/** Opcode 0xb7. */
9023FNIEMOP_DEF(iemOp_BH_Ib)
9024{
9025 IEMOP_MNEMONIC("mov BH,Ib");
9026 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
9027}
9028
9029
9030/**
9031 * Common 'mov regX,immX' helper.
9032 */
9033FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
9034{
9035 switch (pIemCpu->enmEffOpSize)
9036 {
9037 case IEMMODE_16BIT:
9038 {
9039 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9040 IEMOP_HLP_NO_LOCK_PREFIX();
9041
9042 IEM_MC_BEGIN(0, 1);
9043 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
9044 IEM_MC_STORE_GREG_U16(iReg, u16Value);
9045 IEM_MC_ADVANCE_RIP();
9046 IEM_MC_END();
9047 break;
9048 }
9049
9050 case IEMMODE_32BIT:
9051 {
9052 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9053 IEMOP_HLP_NO_LOCK_PREFIX();
9054
9055 IEM_MC_BEGIN(0, 1);
9056 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
9057 IEM_MC_STORE_GREG_U32(iReg, u32Value);
9058 IEM_MC_ADVANCE_RIP();
9059 IEM_MC_END();
9060 break;
9061 }
9062 case IEMMODE_64BIT:
9063 {
9064 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9065 IEMOP_HLP_NO_LOCK_PREFIX();
9066
9067 IEM_MC_BEGIN(0, 1);
9068 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
9069 IEM_MC_STORE_GREG_U64(iReg, u64Value);
9070 IEM_MC_ADVANCE_RIP();
9071 IEM_MC_END();
9072 break;
9073 }
9074 }
9075
9076 return VINF_SUCCESS;
9077}
9078
9079
9080/** Opcode 0xb8. */
9081FNIEMOP_DEF(iemOp_eAX_Iv)
9082{
9083 IEMOP_MNEMONIC("mov rAX,IV");
9084 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
9085}
9086
9087
9088/** Opcode 0xb9. */
9089FNIEMOP_DEF(iemOp_eCX_Iv)
9090{
9091 IEMOP_MNEMONIC("mov rCX,IV");
9092 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
9093}
9094
9095
9096/** Opcode 0xba. */
9097FNIEMOP_DEF(iemOp_eDX_Iv)
9098{
9099 IEMOP_MNEMONIC("mov rDX,IV");
9100 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
9101}
9102
9103
9104/** Opcode 0xbb. */
9105FNIEMOP_DEF(iemOp_eBX_Iv)
9106{
9107 IEMOP_MNEMONIC("mov rBX,IV");
9108 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
9109}
9110
9111
9112/** Opcode 0xbc. */
9113FNIEMOP_DEF(iemOp_eSP_Iv)
9114{
9115 IEMOP_MNEMONIC("mov rSP,IV");
9116 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
9117}
9118
9119
9120/** Opcode 0xbd. */
9121FNIEMOP_DEF(iemOp_eBP_Iv)
9122{
9123 IEMOP_MNEMONIC("mov rBP,IV");
9124 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
9125}
9126
9127
9128/** Opcode 0xbe. */
9129FNIEMOP_DEF(iemOp_eSI_Iv)
9130{
9131 IEMOP_MNEMONIC("mov rSI,IV");
9132 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
9133}
9134
9135
9136/** Opcode 0xbf. */
9137FNIEMOP_DEF(iemOp_eDI_Iv)
9138{
9139 IEMOP_MNEMONIC("mov rDI,IV");
9140 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
9141}
9142
9143
9144/** Opcode 0xc0. */
9145FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
9146{
9147 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9148 PCIEMOPSHIFTSIZES pImpl;
9149 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9150 {
9151 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
9152 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
9153 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
9154 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
9155 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
9156 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
9157 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
9158 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9159 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9160 }
9161 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9162
9163 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9164 {
9165 /* register */
9166 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9167 IEMOP_HLP_NO_LOCK_PREFIX();
9168 IEM_MC_BEGIN(3, 0);
9169 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9170 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9171 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9172 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9173 IEM_MC_REF_EFLAGS(pEFlags);
9174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9175 IEM_MC_ADVANCE_RIP();
9176 IEM_MC_END();
9177 }
9178 else
9179 {
9180 /* memory */
9181 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9182 IEM_MC_BEGIN(3, 2);
9183 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9184 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9185 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9186 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9187
9188 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9189 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9190 IEM_MC_ASSIGN(cShiftArg, cShift);
9191 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9192 IEM_MC_FETCH_EFLAGS(EFlags);
9193 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9194
9195 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9196 IEM_MC_COMMIT_EFLAGS(EFlags);
9197 IEM_MC_ADVANCE_RIP();
9198 IEM_MC_END();
9199 }
9200 return VINF_SUCCESS;
9201}
9202
9203
9204/** Opcode 0xc1. */
9205FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
9206{
9207 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9208 PCIEMOPSHIFTSIZES pImpl;
9209 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9210 {
9211 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
9212 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
9213 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
9214 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
9215 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
9216 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
9217 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
9218 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9219 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9220 }
9221 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9222
9223 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9224 {
9225 /* register */
9226 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9227 IEMOP_HLP_NO_LOCK_PREFIX();
9228 switch (pIemCpu->enmEffOpSize)
9229 {
9230 case IEMMODE_16BIT:
9231 IEM_MC_BEGIN(3, 0);
9232 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9233 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9234 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9235 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9236 IEM_MC_REF_EFLAGS(pEFlags);
9237 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9238 IEM_MC_ADVANCE_RIP();
9239 IEM_MC_END();
9240 return VINF_SUCCESS;
9241
9242 case IEMMODE_32BIT:
9243 IEM_MC_BEGIN(3, 0);
9244 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9245 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9246 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9247 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9248 IEM_MC_REF_EFLAGS(pEFlags);
9249 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9250 IEM_MC_ADVANCE_RIP();
9251 IEM_MC_END();
9252 return VINF_SUCCESS;
9253
9254 case IEMMODE_64BIT:
9255 IEM_MC_BEGIN(3, 0);
9256 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9257 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9258 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9259 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9260 IEM_MC_REF_EFLAGS(pEFlags);
9261 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9262 IEM_MC_ADVANCE_RIP();
9263 IEM_MC_END();
9264 return VINF_SUCCESS;
9265
9266 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9267 }
9268 }
9269 else
9270 {
9271 /* memory */
9272 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9273 switch (pIemCpu->enmEffOpSize)
9274 {
9275 case IEMMODE_16BIT:
9276 IEM_MC_BEGIN(3, 2);
9277 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9278 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9279 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9280 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9281
9282 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9283 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9284 IEM_MC_ASSIGN(cShiftArg, cShift);
9285 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9286 IEM_MC_FETCH_EFLAGS(EFlags);
9287 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9288
9289 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9290 IEM_MC_COMMIT_EFLAGS(EFlags);
9291 IEM_MC_ADVANCE_RIP();
9292 IEM_MC_END();
9293 return VINF_SUCCESS;
9294
9295 case IEMMODE_32BIT:
9296 IEM_MC_BEGIN(3, 2);
9297 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9298 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9299 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9300 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9301
9302 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9303 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9304 IEM_MC_ASSIGN(cShiftArg, cShift);
9305 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9306 IEM_MC_FETCH_EFLAGS(EFlags);
9307 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9308
9309 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9310 IEM_MC_COMMIT_EFLAGS(EFlags);
9311 IEM_MC_ADVANCE_RIP();
9312 IEM_MC_END();
9313 return VINF_SUCCESS;
9314
9315 case IEMMODE_64BIT:
9316 IEM_MC_BEGIN(3, 2);
9317 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9318 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9319 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9320 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9321
9322 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9323 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9324 IEM_MC_ASSIGN(cShiftArg, cShift);
9325 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9326 IEM_MC_FETCH_EFLAGS(EFlags);
9327 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9328
9329 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9330 IEM_MC_COMMIT_EFLAGS(EFlags);
9331 IEM_MC_ADVANCE_RIP();
9332 IEM_MC_END();
9333 return VINF_SUCCESS;
9334
9335 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9336 }
9337 }
9338}
9339
9340
9341/** Opcode 0xc2. */
9342FNIEMOP_DEF(iemOp_retn_Iw)
9343{
9344 IEMOP_MNEMONIC("retn Iw");
9345 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9346 IEMOP_HLP_NO_LOCK_PREFIX();
9347 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9348 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
9349}
9350
9351
9352/** Opcode 0xc3. */
9353FNIEMOP_DEF(iemOp_retn)
9354{
9355 IEMOP_MNEMONIC("retn");
9356 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9357 IEMOP_HLP_NO_LOCK_PREFIX();
9358 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
9359}
9360
9361
9362/** Opcode 0xc4. */
9363FNIEMOP_DEF(iemOp_les_Gv_Mp)
9364{
9365 IEMOP_MNEMONIC("les Gv,Mp");
9366 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
9367}
9368
9369
9370/** Opcode 0xc5. */
9371FNIEMOP_DEF(iemOp_lds_Gv_Mp)
9372{
9373 IEMOP_MNEMONIC("lds Gv,Mp");
9374 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
9375}
9376
9377
9378/** Opcode 0xc6. */
9379FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
9380{
9381 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9382 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9383 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9384 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9385 IEMOP_MNEMONIC("mov Eb,Ib");
9386
9387 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9388 {
9389 /* register access */
9390 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9391 IEM_MC_BEGIN(0, 0);
9392 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
9393 IEM_MC_ADVANCE_RIP();
9394 IEM_MC_END();
9395 }
9396 else
9397 {
9398 /* memory access. */
9399 IEM_MC_BEGIN(0, 1);
9400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9401 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9402 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9403 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
9404 IEM_MC_ADVANCE_RIP();
9405 IEM_MC_END();
9406 }
9407 return VINF_SUCCESS;
9408}
9409
9410
9411/** Opcode 0xc7. */
9412FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9413{
9414 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9415 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9416 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9417 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9418 IEMOP_MNEMONIC("mov Ev,Iz");
9419
9420 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9421 {
9422 /* register access */
9423 switch (pIemCpu->enmEffOpSize)
9424 {
9425 case IEMMODE_16BIT:
9426 IEM_MC_BEGIN(0, 0);
9427 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9428 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9429 IEM_MC_ADVANCE_RIP();
9430 IEM_MC_END();
9431 return VINF_SUCCESS;
9432
9433 case IEMMODE_32BIT:
9434 IEM_MC_BEGIN(0, 0);
9435 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9436 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9437 IEM_MC_ADVANCE_RIP();
9438 IEM_MC_END();
9439 return VINF_SUCCESS;
9440
9441 case IEMMODE_64BIT:
9442 IEM_MC_BEGIN(0, 0);
9443 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9444 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9445 IEM_MC_ADVANCE_RIP();
9446 IEM_MC_END();
9447 return VINF_SUCCESS;
9448
9449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9450 }
9451 }
9452 else
9453 {
9454 /* memory access. */
9455 switch (pIemCpu->enmEffOpSize)
9456 {
9457 case IEMMODE_16BIT:
9458 IEM_MC_BEGIN(0, 1);
9459 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9460 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9461 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9462 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9463 IEM_MC_ADVANCE_RIP();
9464 IEM_MC_END();
9465 return VINF_SUCCESS;
9466
9467 case IEMMODE_32BIT:
9468 IEM_MC_BEGIN(0, 1);
9469 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9470 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9471 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9472 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9473 IEM_MC_ADVANCE_RIP();
9474 IEM_MC_END();
9475 return VINF_SUCCESS;
9476
9477 case IEMMODE_64BIT:
9478 IEM_MC_BEGIN(0, 1);
9479 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9480 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9481 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9482 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9483 IEM_MC_ADVANCE_RIP();
9484 IEM_MC_END();
9485 return VINF_SUCCESS;
9486
9487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9488 }
9489 }
9490}
9491
9492
9493
9494
9495/** Opcode 0xc8. */
9496FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9497
9498
9499/** Opcode 0xc9. */
9500FNIEMOP_DEF(iemOp_leave)
9501{
9502 IEMOP_MNEMONIC("retn");
9503 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9504 IEMOP_HLP_NO_LOCK_PREFIX();
9505 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9506}
9507
9508
9509/** Opcode 0xca. */
9510FNIEMOP_DEF(iemOp_retf_Iw)
9511{
9512 IEMOP_MNEMONIC("retf Iw");
9513 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9514 IEMOP_HLP_NO_LOCK_PREFIX();
9515 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9516 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9517}
9518
9519
9520/** Opcode 0xcb. */
9521FNIEMOP_DEF(iemOp_retf)
9522{
9523 IEMOP_MNEMONIC("retf");
9524 IEMOP_HLP_NO_LOCK_PREFIX();
9525 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9526 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9527}
9528
9529
9530/** Opcode 0xcc. */
9531FNIEMOP_DEF(iemOp_int_3)
9532{
9533 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9534}
9535
9536
9537/** Opcode 0xcd. */
9538FNIEMOP_DEF(iemOp_int_Ib)
9539{
9540 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
9541 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9542}
9543
9544
9545/** Opcode 0xce. */
9546FNIEMOP_DEF(iemOp_into)
9547{
9548 IEM_MC_BEGIN(2, 0);
9549 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9550 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9551 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9552 IEM_MC_END();
9553 return VINF_SUCCESS;
9554}
9555
9556
9557/** Opcode 0xcf. */
9558FNIEMOP_DEF(iemOp_iret)
9559{
9560 IEMOP_MNEMONIC("iret");
9561 IEMOP_HLP_NO_LOCK_PREFIX();
9562 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9563}
9564
9565
9566/** Opcode 0xd0. */
9567FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9568{
9569 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9570 PCIEMOPSHIFTSIZES pImpl;
9571 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9572 {
9573 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9574 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9575 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9576 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9577 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9578 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9579 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9580 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9581 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9582 }
9583 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9584
9585 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9586 {
9587 /* register */
9588 IEMOP_HLP_NO_LOCK_PREFIX();
9589 IEM_MC_BEGIN(3, 0);
9590 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9591 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9592 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9593 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9594 IEM_MC_REF_EFLAGS(pEFlags);
9595 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9596 IEM_MC_ADVANCE_RIP();
9597 IEM_MC_END();
9598 }
9599 else
9600 {
9601 /* memory */
9602 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9603 IEM_MC_BEGIN(3, 2);
9604 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9605 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9606 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9607 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9608
9609 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9610 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9611 IEM_MC_FETCH_EFLAGS(EFlags);
9612 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9613
9614 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9615 IEM_MC_COMMIT_EFLAGS(EFlags);
9616 IEM_MC_ADVANCE_RIP();
9617 IEM_MC_END();
9618 }
9619 return VINF_SUCCESS;
9620}
9621
9622
9623
9624/** Opcode 0xd1. */
9625FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9626{
9627 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9628 PCIEMOPSHIFTSIZES pImpl;
9629 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9630 {
9631 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9632 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9633 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9634 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9635 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9636 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9637 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9638 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9639 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9640 }
9641 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9642
9643 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9644 {
9645 /* register */
9646 IEMOP_HLP_NO_LOCK_PREFIX();
9647 switch (pIemCpu->enmEffOpSize)
9648 {
9649 case IEMMODE_16BIT:
9650 IEM_MC_BEGIN(3, 0);
9651 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9652 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9653 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9654 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9655 IEM_MC_REF_EFLAGS(pEFlags);
9656 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9657 IEM_MC_ADVANCE_RIP();
9658 IEM_MC_END();
9659 return VINF_SUCCESS;
9660
9661 case IEMMODE_32BIT:
9662 IEM_MC_BEGIN(3, 0);
9663 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9664 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9665 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9666 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9667 IEM_MC_REF_EFLAGS(pEFlags);
9668 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9669 IEM_MC_ADVANCE_RIP();
9670 IEM_MC_END();
9671 return VINF_SUCCESS;
9672
9673 case IEMMODE_64BIT:
9674 IEM_MC_BEGIN(3, 0);
9675 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9676 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9677 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9678 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9679 IEM_MC_REF_EFLAGS(pEFlags);
9680 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9681 IEM_MC_ADVANCE_RIP();
9682 IEM_MC_END();
9683 return VINF_SUCCESS;
9684
9685 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9686 }
9687 }
9688 else
9689 {
9690 /* memory */
9691 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9692 switch (pIemCpu->enmEffOpSize)
9693 {
9694 case IEMMODE_16BIT:
9695 IEM_MC_BEGIN(3, 2);
9696 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9697 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9698 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9699 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9700
9701 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9702 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9703 IEM_MC_FETCH_EFLAGS(EFlags);
9704 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9705
9706 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9707 IEM_MC_COMMIT_EFLAGS(EFlags);
9708 IEM_MC_ADVANCE_RIP();
9709 IEM_MC_END();
9710 return VINF_SUCCESS;
9711
9712 case IEMMODE_32BIT:
9713 IEM_MC_BEGIN(3, 2);
9714 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9715 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9716 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9717 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9718
9719 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9720 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9721 IEM_MC_FETCH_EFLAGS(EFlags);
9722 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9723
9724 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9725 IEM_MC_COMMIT_EFLAGS(EFlags);
9726 IEM_MC_ADVANCE_RIP();
9727 IEM_MC_END();
9728 return VINF_SUCCESS;
9729
9730 case IEMMODE_64BIT:
9731 IEM_MC_BEGIN(3, 2);
9732 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9733 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9734 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9735 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9736
9737 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9738 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9739 IEM_MC_FETCH_EFLAGS(EFlags);
9740 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9741
9742 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9743 IEM_MC_COMMIT_EFLAGS(EFlags);
9744 IEM_MC_ADVANCE_RIP();
9745 IEM_MC_END();
9746 return VINF_SUCCESS;
9747
9748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9749 }
9750 }
9751}
9752
9753
9754/** Opcode 0xd2. */
9755FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9756{
9757 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9758 PCIEMOPSHIFTSIZES pImpl;
9759 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9760 {
9761 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9762 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
9763 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
9764 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
9765 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
9766 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
9767 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
9768 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9769 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
9770 }
9771 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9772
9773 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9774 {
9775 /* register */
9776 IEMOP_HLP_NO_LOCK_PREFIX();
9777 IEM_MC_BEGIN(3, 0);
9778 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9779 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9780 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9781 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9782 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9783 IEM_MC_REF_EFLAGS(pEFlags);
9784 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9785 IEM_MC_ADVANCE_RIP();
9786 IEM_MC_END();
9787 }
9788 else
9789 {
9790 /* memory */
9791 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9792 IEM_MC_BEGIN(3, 2);
9793 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9794 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9795 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9796 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9797
9798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9799 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9800 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9801 IEM_MC_FETCH_EFLAGS(EFlags);
9802 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9803
9804 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9805 IEM_MC_COMMIT_EFLAGS(EFlags);
9806 IEM_MC_ADVANCE_RIP();
9807 IEM_MC_END();
9808 }
9809 return VINF_SUCCESS;
9810}
9811
9812
9813/** Opcode 0xd3. */
9814FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
9815{
9816 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9817 PCIEMOPSHIFTSIZES pImpl;
9818 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9819 {
9820 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
9821 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
9822 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
9823 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
9824 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
9825 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
9826 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
9827 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9828 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9829 }
9830 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9831
9832 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9833 {
9834 /* register */
9835 IEMOP_HLP_NO_LOCK_PREFIX();
9836 switch (pIemCpu->enmEffOpSize)
9837 {
9838 case IEMMODE_16BIT:
9839 IEM_MC_BEGIN(3, 0);
9840 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9841 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9842 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9843 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9844 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9845 IEM_MC_REF_EFLAGS(pEFlags);
9846 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9847 IEM_MC_ADVANCE_RIP();
9848 IEM_MC_END();
9849 return VINF_SUCCESS;
9850
9851 case IEMMODE_32BIT:
9852 IEM_MC_BEGIN(3, 0);
9853 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9854 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9855 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9856 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9857 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9858 IEM_MC_REF_EFLAGS(pEFlags);
9859 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9860 IEM_MC_ADVANCE_RIP();
9861 IEM_MC_END();
9862 return VINF_SUCCESS;
9863
9864 case IEMMODE_64BIT:
9865 IEM_MC_BEGIN(3, 0);
9866 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9867 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9868 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9869 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9870 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9871 IEM_MC_REF_EFLAGS(pEFlags);
9872 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9873 IEM_MC_ADVANCE_RIP();
9874 IEM_MC_END();
9875 return VINF_SUCCESS;
9876
9877 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9878 }
9879 }
9880 else
9881 {
9882 /* memory */
9883 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9884 switch (pIemCpu->enmEffOpSize)
9885 {
9886 case IEMMODE_16BIT:
9887 IEM_MC_BEGIN(3, 2);
9888 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9889 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9890 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9891 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9892
9893 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9894 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9895 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9896 IEM_MC_FETCH_EFLAGS(EFlags);
9897 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9898
9899 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9900 IEM_MC_COMMIT_EFLAGS(EFlags);
9901 IEM_MC_ADVANCE_RIP();
9902 IEM_MC_END();
9903 return VINF_SUCCESS;
9904
9905 case IEMMODE_32BIT:
9906 IEM_MC_BEGIN(3, 2);
9907 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9908 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9909 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9910 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9911
9912 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9913 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9914 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9915 IEM_MC_FETCH_EFLAGS(EFlags);
9916 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9917
9918 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9919 IEM_MC_COMMIT_EFLAGS(EFlags);
9920 IEM_MC_ADVANCE_RIP();
9921 IEM_MC_END();
9922 return VINF_SUCCESS;
9923
9924 case IEMMODE_64BIT:
9925 IEM_MC_BEGIN(3, 2);
9926 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9927 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9928 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9929 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9930
9931 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9932 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9933 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9934 IEM_MC_FETCH_EFLAGS(EFlags);
9935 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9936
9937 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9938 IEM_MC_COMMIT_EFLAGS(EFlags);
9939 IEM_MC_ADVANCE_RIP();
9940 IEM_MC_END();
9941 return VINF_SUCCESS;
9942
9943 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9944 }
9945 }
9946}
9947
9948/** Opcode 0xd4. */
9949FNIEMOP_DEF(iemOp_aam_Ib)
9950{
9951 IEMOP_MNEMONIC("aam Ib");
9952 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
9953 IEMOP_HLP_NO_LOCK_PREFIX();
9954 IEMOP_HLP_NO_64BIT();
9955 if (!bImm)
9956 return IEMOP_RAISE_DIVIDE_ERROR();
9957 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
9958}
9959
9960
9961/** Opcode 0xd5. */
9962FNIEMOP_DEF(iemOp_aad_Ib)
9963{
9964 IEMOP_MNEMONIC("aad Ib");
9965 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
9966 IEMOP_HLP_NO_LOCK_PREFIX();
9967 IEMOP_HLP_NO_64BIT();
9968 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
9969}
9970
9971
9972/** Opcode 0xd7. */
9973FNIEMOP_DEF(iemOp_xlat)
9974{
9975 IEMOP_MNEMONIC("xlat");
9976 IEMOP_HLP_NO_LOCK_PREFIX();
9977 switch (pIemCpu->enmEffAddrMode)
9978 {
9979 case IEMMODE_16BIT:
9980 IEM_MC_BEGIN(2, 0);
9981 IEM_MC_LOCAL(uint8_t, u8Tmp);
9982 IEM_MC_LOCAL(uint16_t, u16Addr);
9983 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
9984 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
9985 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
9986 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9987 IEM_MC_ADVANCE_RIP();
9988 IEM_MC_END();
9989 return VINF_SUCCESS;
9990
9991 case IEMMODE_32BIT:
9992 IEM_MC_BEGIN(2, 0);
9993 IEM_MC_LOCAL(uint8_t, u8Tmp);
9994 IEM_MC_LOCAL(uint32_t, u32Addr);
9995 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
9996 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
9997 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
9998 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9999 IEM_MC_ADVANCE_RIP();
10000 IEM_MC_END();
10001 return VINF_SUCCESS;
10002
10003 case IEMMODE_64BIT:
10004 IEM_MC_BEGIN(2, 0);
10005 IEM_MC_LOCAL(uint8_t, u8Tmp);
10006 IEM_MC_LOCAL(uint64_t, u64Addr);
10007 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
10008 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
10009 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
10010 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10011 IEM_MC_ADVANCE_RIP();
10012 IEM_MC_END();
10013 return VINF_SUCCESS;
10014
10015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10016 }
10017}
10018
10019
10020/** Opcode 0xd8. */
10021FNIEMOP_STUB(iemOp_EscF0);
10022/** Opcode 0xd9. */
10023FNIEMOP_STUB(iemOp_EscF1);
10024/** Opcode 0xda. */
10025FNIEMOP_STUB(iemOp_EscF2);
10026
10027
10028/** Opcode 0xdb /0. */
10029FNIEMOP_STUB_1(iemOp_fild_dw, uint8_t, bRm);
10030/** Opcode 0xdb /1. */
10031FNIEMOP_STUB_1(iemOp_fisttp_dw, uint8_t, bRm);
10032/** Opcode 0xdb /2. */
10033FNIEMOP_STUB_1(iemOp_fist_dw, uint8_t, bRm);
10034/** Opcode 0xdb /3. */
10035FNIEMOP_STUB_1(iemOp_fistp_dw, uint8_t, bRm);
10036/** Opcode 0xdb /5. */
10037FNIEMOP_STUB_1(iemOp_fld_xr, uint8_t, bRm);
10038/** Opcode 0xdb /7. */
10039FNIEMOP_STUB_1(iemOp_fstp_xr, uint8_t, bRm);
10040
10041
10042/** Opcode 0xdb 0xe0. */
10043FNIEMOP_DEF(iemOp_fneni)
10044{
10045 IEMOP_MNEMONIC("fneni (8087/ign)");
10046 IEM_MC_BEGIN(0,0);
10047 IEM_MC_ADVANCE_RIP();
10048 IEM_MC_END();
10049 return VINF_SUCCESS;
10050}
10051
10052
10053/** Opcode 0xdb 0xe1. */
10054FNIEMOP_DEF(iemOp_fndisi)
10055{
10056 IEMOP_MNEMONIC("fndisi (8087/ign)");
10057 IEM_MC_BEGIN(0,0);
10058 IEM_MC_ADVANCE_RIP();
10059 IEM_MC_END();
10060 return VINF_SUCCESS;
10061}
10062
10063
10064/** Opcode 0xdb 0xe2. */
10065FNIEMOP_STUB(iemOp_fnclex);
10066
10067
10068/** Opcode 0xdb 0xe3. */
10069FNIEMOP_DEF(iemOp_fninit)
10070{
10071 IEMOP_MNEMONIC("fninit");
10072 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
10073}
10074
10075
10076/** Opcode 0xdb 0xe4. */
10077FNIEMOP_DEF(iemOp_fnsetpm)
10078{
10079 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
10080 IEM_MC_BEGIN(0,0);
10081 IEM_MC_ADVANCE_RIP();
10082 IEM_MC_END();
10083 return VINF_SUCCESS;
10084}
10085
10086
10087/** Opcode 0xdb 0xe5. */
10088FNIEMOP_DEF(iemOp_frstpm)
10089{
10090 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
10091 IEM_MC_BEGIN(0,0);
10092 IEM_MC_ADVANCE_RIP();
10093 IEM_MC_END();
10094 return VINF_SUCCESS;
10095}
10096
10097
10098/** Opcode 0xdb. */
10099FNIEMOP_DEF(iemOp_EscF3)
10100{
10101 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10102 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10103 {
10104 switch (bRm & 0xf8)
10105 {
10106 case 0xc0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcmovnb
10107 case 0xc8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcmovne
10108 case 0xd0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcmovnbe
10109 case 0xd8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcmovnu
10110 case 0xe0:
10111 IEMOP_HLP_NO_LOCK_PREFIX();
10112 switch (bRm)
10113 {
10114 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
10115 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
10116 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
10117 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
10118 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
10119 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
10120 default: return IEMOP_RAISE_INVALID_OPCODE();
10121 }
10122 break;
10123 case 0xe8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fucomi
10124 case 0xf0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcomi
10125 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10126 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10127 }
10128 }
10129 else
10130 {
10131 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10132 {
10133 case 0: return FNIEMOP_CALL_1(iemOp_fild_dw, bRm);
10134 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_dw,bRm);
10135 case 2: return FNIEMOP_CALL_1(iemOp_fist_dw, bRm);
10136 case 3: return FNIEMOP_CALL_1(iemOp_fistp_dw, bRm);
10137 case 4: return IEMOP_RAISE_INVALID_OPCODE();
10138 case 5: return FNIEMOP_CALL_1(iemOp_fld_xr, bRm);
10139 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10140 case 7: return FNIEMOP_CALL_1(iemOp_fstp_xr, bRm);
10141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10142 }
10143 }
10144}
10145
10146/** Opcode 0xdc. */
10147FNIEMOP_STUB(iemOp_EscF4);
10148/** Opcode 0xdd. */
10149FNIEMOP_STUB(iemOp_EscF5);
10150
10151/** Opcode 0xde 0xd9. */
10152FNIEMOP_STUB(iemOp_fcompp);
10153
10154/** Opcode 0xde. */
10155FNIEMOP_DEF(iemOp_EscF6)
10156{
10157 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10158 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10159 {
10160 switch (bRm & 0xf8)
10161 {
10162 case 0xc0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fiaddp
10163 case 0xc8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fimulp
10164 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10165 case 0xd8:
10166 switch (bRm)
10167 {
10168 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
10169 default: return IEMOP_RAISE_INVALID_OPCODE();
10170 }
10171 case 0xe0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fsubrp
10172 case 0xe8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fsubp
10173 case 0xf0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fdivrp
10174 case 0xf8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fdivp
10175 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10176 }
10177 }
10178 else
10179 {
10180#if 0
10181 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10182 {
10183 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
10184 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
10185 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
10186 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
10187 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
10188 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
10189 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
10190 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
10191 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10192 }
10193#endif
10194 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
10195 }
10196}
10197
10198
10199/** Opcode 0xdf 0xe0. */
10200FNIEMOP_DEF(iemOp_fnstsw_ax)
10201{
10202 IEMOP_MNEMONIC("fnstsw ax");
10203 IEMOP_HLP_NO_LOCK_PREFIX();
10204
10205 IEM_MC_BEGIN(0, 1);
10206 IEM_MC_LOCAL(uint16_t, u16Tmp);
10207 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10208 IEM_MC_FETCH_FSW(u16Tmp);
10209 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10210 IEM_MC_ADVANCE_RIP();
10211 IEM_MC_END();
10212 return VINF_SUCCESS;
10213}
10214
10215
10216/** Opcode 0xdf. */
10217FNIEMOP_DEF(iemOp_EscF7)
10218{
10219 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10220 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10221 {
10222 switch (bRm & 0xf8)
10223 {
10224 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
10225 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
10226 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10227 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
10228 case 0xe0:
10229 switch (bRm)
10230 {
10231 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
10232 default: return IEMOP_RAISE_INVALID_OPCODE();
10233 }
10234 case 0xe8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fucomip
10235 case 0xf0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcomip
10236 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10237 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10238 }
10239 }
10240 else
10241 {
10242 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
10243 }
10244}
10245
10246
10247/** Opcode 0xe0. */
10248FNIEMOP_DEF(iemOp_loopne_Jb)
10249{
10250 IEMOP_MNEMONIC("loopne Jb");
10251 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10252 IEMOP_HLP_NO_LOCK_PREFIX();
10253 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10254
10255 switch (pIemCpu->enmEffAddrMode)
10256 {
10257 case IEMMODE_16BIT:
10258 IEM_MC_BEGIN(0,0);
10259 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10260 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10261 IEM_MC_REL_JMP_S8(i8Imm);
10262 } IEM_MC_ELSE() {
10263 IEM_MC_ADVANCE_RIP();
10264 } IEM_MC_ENDIF();
10265 IEM_MC_END();
10266 return VINF_SUCCESS;
10267
10268 case IEMMODE_32BIT:
10269 IEM_MC_BEGIN(0,0);
10270 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10271 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10272 IEM_MC_REL_JMP_S8(i8Imm);
10273 } IEM_MC_ELSE() {
10274 IEM_MC_ADVANCE_RIP();
10275 } IEM_MC_ENDIF();
10276 IEM_MC_END();
10277 return VINF_SUCCESS;
10278
10279 case IEMMODE_64BIT:
10280 IEM_MC_BEGIN(0,0);
10281 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10282 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10283 IEM_MC_REL_JMP_S8(i8Imm);
10284 } IEM_MC_ELSE() {
10285 IEM_MC_ADVANCE_RIP();
10286 } IEM_MC_ENDIF();
10287 IEM_MC_END();
10288 return VINF_SUCCESS;
10289
10290 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10291 }
10292}
10293
10294
10295/** Opcode 0xe1. */
10296FNIEMOP_DEF(iemOp_loope_Jb)
10297{
10298 IEMOP_MNEMONIC("loope Jb");
10299 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10300 IEMOP_HLP_NO_LOCK_PREFIX();
10301 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10302
10303 switch (pIemCpu->enmEffAddrMode)
10304 {
10305 case IEMMODE_16BIT:
10306 IEM_MC_BEGIN(0,0);
10307 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10308 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10309 IEM_MC_REL_JMP_S8(i8Imm);
10310 } IEM_MC_ELSE() {
10311 IEM_MC_ADVANCE_RIP();
10312 } IEM_MC_ENDIF();
10313 IEM_MC_END();
10314 return VINF_SUCCESS;
10315
10316 case IEMMODE_32BIT:
10317 IEM_MC_BEGIN(0,0);
10318 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10319 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10320 IEM_MC_REL_JMP_S8(i8Imm);
10321 } IEM_MC_ELSE() {
10322 IEM_MC_ADVANCE_RIP();
10323 } IEM_MC_ENDIF();
10324 IEM_MC_END();
10325 return VINF_SUCCESS;
10326
10327 case IEMMODE_64BIT:
10328 IEM_MC_BEGIN(0,0);
10329 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10330 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10331 IEM_MC_REL_JMP_S8(i8Imm);
10332 } IEM_MC_ELSE() {
10333 IEM_MC_ADVANCE_RIP();
10334 } IEM_MC_ENDIF();
10335 IEM_MC_END();
10336 return VINF_SUCCESS;
10337
10338 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10339 }
10340}
10341
10342
10343/** Opcode 0xe2. */
10344FNIEMOP_DEF(iemOp_loop_Jb)
10345{
10346 IEMOP_MNEMONIC("loop Jb");
10347 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10348 IEMOP_HLP_NO_LOCK_PREFIX();
10349 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10350
10351 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
10352 * using the 32-bit operand size override. How can that be restarted? See
10353 * weird pseudo code in intel manual. */
10354 switch (pIemCpu->enmEffAddrMode)
10355 {
10356 case IEMMODE_16BIT:
10357 IEM_MC_BEGIN(0,0);
10358 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10359 IEM_MC_IF_CX_IS_NZ() {
10360 IEM_MC_REL_JMP_S8(i8Imm);
10361 } IEM_MC_ELSE() {
10362 IEM_MC_ADVANCE_RIP();
10363 } IEM_MC_ENDIF();
10364 IEM_MC_END();
10365 return VINF_SUCCESS;
10366
10367 case IEMMODE_32BIT:
10368 IEM_MC_BEGIN(0,0);
10369 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10370 IEM_MC_IF_ECX_IS_NZ() {
10371 IEM_MC_REL_JMP_S8(i8Imm);
10372 } IEM_MC_ELSE() {
10373 IEM_MC_ADVANCE_RIP();
10374 } IEM_MC_ENDIF();
10375 IEM_MC_END();
10376 return VINF_SUCCESS;
10377
10378 case IEMMODE_64BIT:
10379 IEM_MC_BEGIN(0,0);
10380 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10381 IEM_MC_IF_RCX_IS_NZ() {
10382 IEM_MC_REL_JMP_S8(i8Imm);
10383 } IEM_MC_ELSE() {
10384 IEM_MC_ADVANCE_RIP();
10385 } IEM_MC_ENDIF();
10386 IEM_MC_END();
10387 return VINF_SUCCESS;
10388
10389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10390 }
10391}
10392
10393
10394/** Opcode 0xe3. */
10395FNIEMOP_DEF(iemOp_jecxz_Jb)
10396{
10397 IEMOP_MNEMONIC("jecxz Jb");
10398 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10399 IEMOP_HLP_NO_LOCK_PREFIX();
10400 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10401
10402 switch (pIemCpu->enmEffAddrMode)
10403 {
10404 case IEMMODE_16BIT:
10405 IEM_MC_BEGIN(0,0);
10406 IEM_MC_IF_CX_IS_NZ() {
10407 IEM_MC_ADVANCE_RIP();
10408 } IEM_MC_ELSE() {
10409 IEM_MC_REL_JMP_S8(i8Imm);
10410 } IEM_MC_ENDIF();
10411 IEM_MC_END();
10412 return VINF_SUCCESS;
10413
10414 case IEMMODE_32BIT:
10415 IEM_MC_BEGIN(0,0);
10416 IEM_MC_IF_ECX_IS_NZ() {
10417 IEM_MC_ADVANCE_RIP();
10418 } IEM_MC_ELSE() {
10419 IEM_MC_REL_JMP_S8(i8Imm);
10420 } IEM_MC_ENDIF();
10421 IEM_MC_END();
10422 return VINF_SUCCESS;
10423
10424 case IEMMODE_64BIT:
10425 IEM_MC_BEGIN(0,0);
10426 IEM_MC_IF_RCX_IS_NZ() {
10427 IEM_MC_ADVANCE_RIP();
10428 } IEM_MC_ELSE() {
10429 IEM_MC_REL_JMP_S8(i8Imm);
10430 } IEM_MC_ENDIF();
10431 IEM_MC_END();
10432 return VINF_SUCCESS;
10433
10434 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10435 }
10436}
10437
10438
10439/** Opcode 0xe4 */
10440FNIEMOP_DEF(iemOp_in_AL_Ib)
10441{
10442 IEMOP_MNEMONIC("in eAX,Ib");
10443 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10444 IEMOP_HLP_NO_LOCK_PREFIX();
10445 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
10446}
10447
10448
10449/** Opcode 0xe5 */
10450FNIEMOP_DEF(iemOp_in_eAX_Ib)
10451{
10452 IEMOP_MNEMONIC("in eAX,Ib");
10453 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10454 IEMOP_HLP_NO_LOCK_PREFIX();
10455 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10456}
10457
10458
10459/** Opcode 0xe6 */
10460FNIEMOP_DEF(iemOp_out_Ib_AL)
10461{
10462 IEMOP_MNEMONIC("out Ib,AL");
10463 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10464 IEMOP_HLP_NO_LOCK_PREFIX();
10465 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
10466}
10467
10468
10469/** Opcode 0xe7 */
10470FNIEMOP_DEF(iemOp_out_Ib_eAX)
10471{
10472 IEMOP_MNEMONIC("out Ib,eAX");
10473 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10474 IEMOP_HLP_NO_LOCK_PREFIX();
10475 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10476}
10477
10478
10479/** Opcode 0xe8. */
10480FNIEMOP_DEF(iemOp_call_Jv)
10481{
10482 IEMOP_MNEMONIC("call Jv");
10483 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10484 switch (pIemCpu->enmEffOpSize)
10485 {
10486 case IEMMODE_16BIT:
10487 {
10488 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10489 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
10490 }
10491
10492 case IEMMODE_32BIT:
10493 {
10494 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10495 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
10496 }
10497
10498 case IEMMODE_64BIT:
10499 {
10500 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10501 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
10502 }
10503
10504 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10505 }
10506}
10507
10508
10509/** Opcode 0xe9. */
10510FNIEMOP_DEF(iemOp_jmp_Jv)
10511{
10512 IEMOP_MNEMONIC("jmp Jv");
10513 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10514 switch (pIemCpu->enmEffOpSize)
10515 {
10516 case IEMMODE_16BIT:
10517 {
10518 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
10519 IEM_MC_BEGIN(0, 0);
10520 IEM_MC_REL_JMP_S16(i16Imm);
10521 IEM_MC_END();
10522 return VINF_SUCCESS;
10523 }
10524
10525 case IEMMODE_64BIT:
10526 case IEMMODE_32BIT:
10527 {
10528 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
10529 IEM_MC_BEGIN(0, 0);
10530 IEM_MC_REL_JMP_S32(i32Imm);
10531 IEM_MC_END();
10532 return VINF_SUCCESS;
10533 }
10534
10535 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10536 }
10537}
10538
10539
10540/** Opcode 0xea. */
10541FNIEMOP_DEF(iemOp_jmp_Ap)
10542{
10543 IEMOP_MNEMONIC("jmp Ap");
10544 IEMOP_HLP_NO_64BIT();
10545
10546 /* Decode the far pointer address and pass it on to the far call C implementation. */
10547 uint32_t offSeg;
10548 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10549 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10550 else
10551 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10552 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10553 IEMOP_HLP_NO_LOCK_PREFIX();
10554 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
10555}
10556
10557
10558/** Opcode 0xeb. */
10559FNIEMOP_DEF(iemOp_jmp_Jb)
10560{
10561 IEMOP_MNEMONIC("jmp Jb");
10562 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10563 IEMOP_HLP_NO_LOCK_PREFIX();
10564 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10565
10566 IEM_MC_BEGIN(0, 0);
10567 IEM_MC_REL_JMP_S8(i8Imm);
10568 IEM_MC_END();
10569 return VINF_SUCCESS;
10570}
10571
10572
10573/** Opcode 0xec */
10574FNIEMOP_DEF(iemOp_in_AL_DX)
10575{
10576 IEMOP_MNEMONIC("in AL,DX");
10577 IEMOP_HLP_NO_LOCK_PREFIX();
10578 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
10579}
10580
10581
10582/** Opcode 0xed */
10583FNIEMOP_DEF(iemOp_eAX_DX)
10584{
10585 IEMOP_MNEMONIC("in eAX,DX");
10586 IEMOP_HLP_NO_LOCK_PREFIX();
10587 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10588}
10589
10590
10591/** Opcode 0xee */
10592FNIEMOP_DEF(iemOp_out_DX_AL)
10593{
10594 IEMOP_MNEMONIC("out DX,AL");
10595 IEMOP_HLP_NO_LOCK_PREFIX();
10596 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
10597}
10598
10599
10600/** Opcode 0xef */
10601FNIEMOP_DEF(iemOp_out_DX_eAX)
10602{
10603 IEMOP_MNEMONIC("out DX,eAX");
10604 IEMOP_HLP_NO_LOCK_PREFIX();
10605 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10606}
10607
10608
10609/** Opcode 0xf0. */
10610FNIEMOP_DEF(iemOp_lock)
10611{
10612 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
10613
10614 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10615 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10616}
10617
10618
10619/** Opcode 0xf2. */
10620FNIEMOP_DEF(iemOp_repne)
10621{
10622 /* This overrides any previous REPE prefix. */
10623 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
10624 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
10625
10626 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10627 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10628}
10629
10630
10631/** Opcode 0xf3. */
10632FNIEMOP_DEF(iemOp_repe)
10633{
10634 /* This overrides any previous REPNE prefix. */
10635 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
10636 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
10637
10638 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10639 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10640}
10641
10642
10643/** Opcode 0xf4. */
10644FNIEMOP_DEF(iemOp_hlt)
10645{
10646 IEMOP_HLP_NO_LOCK_PREFIX();
10647 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
10648}
10649
10650
10651/** Opcode 0xf5. */
10652FNIEMOP_DEF(iemOp_cmc)
10653{
10654 IEMOP_MNEMONIC("cmc");
10655 IEMOP_HLP_NO_LOCK_PREFIX();
10656 IEM_MC_BEGIN(0, 0);
10657 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
10658 IEM_MC_ADVANCE_RIP();
10659 IEM_MC_END();
10660 return VINF_SUCCESS;
10661}
10662
10663
10664/**
10665 * Common implementation of 'inc/dec/not/neg Eb'.
10666 *
10667 * @param bRm The RM byte.
10668 * @param pImpl The instruction implementation.
10669 */
10670FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10671{
10672 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10673 {
10674 /* register access */
10675 IEM_MC_BEGIN(2, 0);
10676 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10677 IEM_MC_ARG(uint32_t *, pEFlags, 1);
10678 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10679 IEM_MC_REF_EFLAGS(pEFlags);
10680 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10681 IEM_MC_ADVANCE_RIP();
10682 IEM_MC_END();
10683 }
10684 else
10685 {
10686 /* memory access. */
10687 IEM_MC_BEGIN(2, 2);
10688 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10689 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10690 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10691
10692 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10693 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10694 IEM_MC_FETCH_EFLAGS(EFlags);
10695 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10696 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10697 else
10698 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
10699
10700 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10701 IEM_MC_COMMIT_EFLAGS(EFlags);
10702 IEM_MC_ADVANCE_RIP();
10703 IEM_MC_END();
10704 }
10705 return VINF_SUCCESS;
10706}
10707
10708
10709/**
10710 * Common implementation of 'inc/dec/not/neg Ev'.
10711 *
10712 * @param bRm The RM byte.
10713 * @param pImpl The instruction implementation.
10714 */
10715FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10716{
10717 /* Registers are handled by a common worker. */
10718 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10719 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10720
10721 /* Memory we do here. */
10722 switch (pIemCpu->enmEffOpSize)
10723 {
10724 case IEMMODE_16BIT:
10725 IEM_MC_BEGIN(2, 2);
10726 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10727 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10728 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10729
10730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10731 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10732 IEM_MC_FETCH_EFLAGS(EFlags);
10733 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10734 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
10735 else
10736 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
10737
10738 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10739 IEM_MC_COMMIT_EFLAGS(EFlags);
10740 IEM_MC_ADVANCE_RIP();
10741 IEM_MC_END();
10742 return VINF_SUCCESS;
10743
10744 case IEMMODE_32BIT:
10745 IEM_MC_BEGIN(2, 2);
10746 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10747 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10748 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10749
10750 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10751 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10752 IEM_MC_FETCH_EFLAGS(EFlags);
10753 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10754 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
10755 else
10756 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
10757
10758 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10759 IEM_MC_COMMIT_EFLAGS(EFlags);
10760 IEM_MC_ADVANCE_RIP();
10761 IEM_MC_END();
10762 return VINF_SUCCESS;
10763
10764 case IEMMODE_64BIT:
10765 IEM_MC_BEGIN(2, 2);
10766 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10767 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10768 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10769
10770 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10771 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10772 IEM_MC_FETCH_EFLAGS(EFlags);
10773 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10774 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
10775 else
10776 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
10777
10778 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10779 IEM_MC_COMMIT_EFLAGS(EFlags);
10780 IEM_MC_ADVANCE_RIP();
10781 IEM_MC_END();
10782 return VINF_SUCCESS;
10783
10784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10785 }
10786}
10787
10788
10789/** Opcode 0xf6 /0. */
10790FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
10791{
10792 IEMOP_MNEMONIC("test Eb,Ib");
10793 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10794
10795 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10796 {
10797 /* register access */
10798 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10799 IEMOP_HLP_NO_LOCK_PREFIX();
10800
10801 IEM_MC_BEGIN(3, 0);
10802 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10803 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
10804 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10805 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10806 IEM_MC_REF_EFLAGS(pEFlags);
10807 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10808 IEM_MC_ADVANCE_RIP();
10809 IEM_MC_END();
10810 }
10811 else
10812 {
10813 /* memory access. */
10814 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10815
10816 IEM_MC_BEGIN(3, 2);
10817 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10818 IEM_MC_ARG(uint8_t, u8Src, 1);
10819 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10820 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10821
10822 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10823 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10824 IEM_MC_ASSIGN(u8Src, u8Imm);
10825 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10826 IEM_MC_FETCH_EFLAGS(EFlags);
10827 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10828
10829 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
10830 IEM_MC_COMMIT_EFLAGS(EFlags);
10831 IEM_MC_ADVANCE_RIP();
10832 IEM_MC_END();
10833 }
10834 return VINF_SUCCESS;
10835}
10836
10837
10838/** Opcode 0xf7 /0. */
10839FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
10840{
10841 IEMOP_MNEMONIC("test Ev,Iv");
10842 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10843 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10844
10845 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10846 {
10847 /* register access */
10848 switch (pIemCpu->enmEffOpSize)
10849 {
10850 case IEMMODE_16BIT:
10851 {
10852 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10853 IEM_MC_BEGIN(3, 0);
10854 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10855 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
10856 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10857 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10858 IEM_MC_REF_EFLAGS(pEFlags);
10859 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10860 IEM_MC_ADVANCE_RIP();
10861 IEM_MC_END();
10862 return VINF_SUCCESS;
10863 }
10864
10865 case IEMMODE_32BIT:
10866 {
10867 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10868 IEM_MC_BEGIN(3, 0);
10869 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10870 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
10871 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10872 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10873 IEM_MC_REF_EFLAGS(pEFlags);
10874 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10875 IEM_MC_ADVANCE_RIP();
10876 IEM_MC_END();
10877 return VINF_SUCCESS;
10878 }
10879
10880 case IEMMODE_64BIT:
10881 {
10882 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10883 IEM_MC_BEGIN(3, 0);
10884 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10885 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
10886 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10887 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10888 IEM_MC_REF_EFLAGS(pEFlags);
10889 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10890 IEM_MC_ADVANCE_RIP();
10891 IEM_MC_END();
10892 return VINF_SUCCESS;
10893 }
10894
10895 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10896 }
10897 }
10898 else
10899 {
10900 /* memory access. */
10901 switch (pIemCpu->enmEffOpSize)
10902 {
10903 case IEMMODE_16BIT:
10904 {
10905 IEM_MC_BEGIN(3, 2);
10906 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10907 IEM_MC_ARG(uint16_t, u16Src, 1);
10908 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10909 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10910
10911 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10912 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10913 IEM_MC_ASSIGN(u16Src, u16Imm);
10914 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10915 IEM_MC_FETCH_EFLAGS(EFlags);
10916 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10917
10918 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
10919 IEM_MC_COMMIT_EFLAGS(EFlags);
10920 IEM_MC_ADVANCE_RIP();
10921 IEM_MC_END();
10922 return VINF_SUCCESS;
10923 }
10924
10925 case IEMMODE_32BIT:
10926 {
10927 IEM_MC_BEGIN(3, 2);
10928 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10929 IEM_MC_ARG(uint32_t, u32Src, 1);
10930 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10931 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10932
10933 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10934 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10935 IEM_MC_ASSIGN(u32Src, u32Imm);
10936 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10937 IEM_MC_FETCH_EFLAGS(EFlags);
10938 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10939
10940 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
10941 IEM_MC_COMMIT_EFLAGS(EFlags);
10942 IEM_MC_ADVANCE_RIP();
10943 IEM_MC_END();
10944 return VINF_SUCCESS;
10945 }
10946
10947 case IEMMODE_64BIT:
10948 {
10949 IEM_MC_BEGIN(3, 2);
10950 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10951 IEM_MC_ARG(uint64_t, u64Src, 1);
10952 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10953 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10954
10955 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10956 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10957 IEM_MC_ASSIGN(u64Src, u64Imm);
10958 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10959 IEM_MC_FETCH_EFLAGS(EFlags);
10960 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10961
10962 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
10963 IEM_MC_COMMIT_EFLAGS(EFlags);
10964 IEM_MC_ADVANCE_RIP();
10965 IEM_MC_END();
10966 return VINF_SUCCESS;
10967 }
10968
10969 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10970 }
10971 }
10972}
10973
10974
10975/** Opcode 0xf6 /4, /5, /6 and /7. */
10976FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
10977{
10978 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10979
10980 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10981 {
10982 /* register access */
10983 IEMOP_HLP_NO_LOCK_PREFIX();
10984 IEM_MC_BEGIN(3, 0);
10985 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10986 IEM_MC_ARG(uint8_t, u8Value, 1);
10987 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10988 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10989 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10990 IEM_MC_REF_EFLAGS(pEFlags);
10991 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10992 IEM_MC_ADVANCE_RIP();
10993 IEM_MC_END();
10994 }
10995 else
10996 {
10997 /* memory access. */
10998 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10999
11000 IEM_MC_BEGIN(3, 1);
11001 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11002 IEM_MC_ARG(uint8_t, u8Value, 1);
11003 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11004 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11005
11006 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11007 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
11008 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11009 IEM_MC_REF_EFLAGS(pEFlags);
11010 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
11011
11012 IEM_MC_ADVANCE_RIP();
11013 IEM_MC_END();
11014 }
11015 return VINF_SUCCESS;
11016}
11017
11018
11019/** Opcode 0xf7 /4, /5, /6 and /7. */
11020FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
11021{
11022 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11023 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11024
11025 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11026 {
11027 /* register access */
11028 switch (pIemCpu->enmEffOpSize)
11029 {
11030 case IEMMODE_16BIT:
11031 {
11032 IEMOP_HLP_NO_LOCK_PREFIX();
11033 IEM_MC_BEGIN(4, 1);
11034 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11035 IEM_MC_ARG(uint16_t *, pu16DX, 1);
11036 IEM_MC_ARG(uint16_t, u16Value, 2);
11037 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11038 IEM_MC_LOCAL(int32_t, rc);
11039
11040 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11041 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11042 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11043 IEM_MC_REF_EFLAGS(pEFlags);
11044 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11045 IEM_MC_IF_LOCAL_IS_Z(rc) {
11046 IEM_MC_ADVANCE_RIP();
11047 } IEM_MC_ELSE() {
11048 IEM_MC_RAISE_DIVIDE_ERROR();
11049 } IEM_MC_ENDIF();
11050
11051 IEM_MC_END();
11052 return VINF_SUCCESS;
11053 }
11054
11055 case IEMMODE_32BIT:
11056 {
11057 IEMOP_HLP_NO_LOCK_PREFIX();
11058 IEM_MC_BEGIN(4, 1);
11059 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11060 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11061 IEM_MC_ARG(uint32_t, u32Value, 2);
11062 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11063 IEM_MC_LOCAL(int32_t, rc);
11064
11065 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11066 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11067 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11068 IEM_MC_REF_EFLAGS(pEFlags);
11069 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11070 IEM_MC_IF_LOCAL_IS_Z(rc) {
11071 IEM_MC_ADVANCE_RIP();
11072 } IEM_MC_ELSE() {
11073 IEM_MC_RAISE_DIVIDE_ERROR();
11074 } IEM_MC_ENDIF();
11075
11076 IEM_MC_END();
11077 return VINF_SUCCESS;
11078 }
11079
11080 case IEMMODE_64BIT:
11081 {
11082 IEMOP_HLP_NO_LOCK_PREFIX();
11083 IEM_MC_BEGIN(4, 1);
11084 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11085 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11086 IEM_MC_ARG(uint64_t, u64Value, 2);
11087 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11088 IEM_MC_LOCAL(int32_t, rc);
11089
11090 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11091 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11092 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11093 IEM_MC_REF_EFLAGS(pEFlags);
11094 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11095 IEM_MC_IF_LOCAL_IS_Z(rc) {
11096 IEM_MC_ADVANCE_RIP();
11097 } IEM_MC_ELSE() {
11098 IEM_MC_RAISE_DIVIDE_ERROR();
11099 } IEM_MC_ENDIF();
11100
11101 IEM_MC_END();
11102 return VINF_SUCCESS;
11103 }
11104
11105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11106 }
11107 }
11108 else
11109 {
11110 /* memory access. */
11111 switch (pIemCpu->enmEffOpSize)
11112 {
11113 case IEMMODE_16BIT:
11114 {
11115 IEMOP_HLP_NO_LOCK_PREFIX();
11116 IEM_MC_BEGIN(4, 2);
11117 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11118 IEM_MC_ARG(uint16_t *, pu16DX, 1);
11119 IEM_MC_ARG(uint16_t, u16Value, 2);
11120 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11121 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11122 IEM_MC_LOCAL(int32_t, rc);
11123
11124 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11125 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
11126 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11127 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11128 IEM_MC_REF_EFLAGS(pEFlags);
11129 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11130 IEM_MC_IF_LOCAL_IS_Z(rc) {
11131 IEM_MC_ADVANCE_RIP();
11132 } IEM_MC_ELSE() {
11133 IEM_MC_RAISE_DIVIDE_ERROR();
11134 } IEM_MC_ENDIF();
11135
11136 IEM_MC_END();
11137 return VINF_SUCCESS;
11138 }
11139
11140 case IEMMODE_32BIT:
11141 {
11142 IEMOP_HLP_NO_LOCK_PREFIX();
11143 IEM_MC_BEGIN(4, 2);
11144 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11145 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11146 IEM_MC_ARG(uint32_t, u32Value, 2);
11147 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11148 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11149 IEM_MC_LOCAL(int32_t, rc);
11150
11151 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11152 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
11153 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11154 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11155 IEM_MC_REF_EFLAGS(pEFlags);
11156 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11157 IEM_MC_IF_LOCAL_IS_Z(rc) {
11158 IEM_MC_ADVANCE_RIP();
11159 } IEM_MC_ELSE() {
11160 IEM_MC_RAISE_DIVIDE_ERROR();
11161 } IEM_MC_ENDIF();
11162
11163 IEM_MC_END();
11164 return VINF_SUCCESS;
11165 }
11166
11167 case IEMMODE_64BIT:
11168 {
11169 IEMOP_HLP_NO_LOCK_PREFIX();
11170 IEM_MC_BEGIN(4, 2);
11171 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11172 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11173 IEM_MC_ARG(uint64_t, u64Value, 2);
11174 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11175 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11176 IEM_MC_LOCAL(int32_t, rc);
11177
11178 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11179 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
11180 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11181 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11182 IEM_MC_REF_EFLAGS(pEFlags);
11183 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11184 IEM_MC_IF_LOCAL_IS_Z(rc) {
11185 IEM_MC_ADVANCE_RIP();
11186 } IEM_MC_ELSE() {
11187 IEM_MC_RAISE_DIVIDE_ERROR();
11188 } IEM_MC_ENDIF();
11189
11190 IEM_MC_END();
11191 return VINF_SUCCESS;
11192 }
11193
11194 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11195 }
11196 }
11197}
11198
11199/** Opcode 0xf6. */
11200FNIEMOP_DEF(iemOp_Grp3_Eb)
11201{
11202 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11203 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11204 {
11205 case 0:
11206 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
11207 case 1:
11208 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11209 case 2:
11210 IEMOP_MNEMONIC("not Eb");
11211 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
11212 case 3:
11213 IEMOP_MNEMONIC("neg Eb");
11214 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
11215 case 4:
11216 IEMOP_MNEMONIC("mul Eb");
11217 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11218 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
11219 case 5:
11220 IEMOP_MNEMONIC("imul Eb");
11221 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11222 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
11223 case 6:
11224 IEMOP_MNEMONIC("div Eb");
11225 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11226 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
11227 case 7:
11228 IEMOP_MNEMONIC("idiv Eb");
11229 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11230 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
11231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11232 }
11233}
11234
11235
11236/** Opcode 0xf7. */
11237FNIEMOP_DEF(iemOp_Grp3_Ev)
11238{
11239 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11240 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11241 {
11242 case 0:
11243 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
11244 case 1:
11245 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11246 case 2:
11247 IEMOP_MNEMONIC("not Ev");
11248 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
11249 case 3:
11250 IEMOP_MNEMONIC("neg Ev");
11251 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
11252 case 4:
11253 IEMOP_MNEMONIC("mul Ev");
11254 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11255 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
11256 case 5:
11257 IEMOP_MNEMONIC("imul Ev");
11258 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11259 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
11260 case 6:
11261 IEMOP_MNEMONIC("div Ev");
11262 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11263 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
11264 case 7:
11265 IEMOP_MNEMONIC("idiv Ev");
11266 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11267 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
11268 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11269 }
11270}
11271
11272
11273/** Opcode 0xf8. */
11274FNIEMOP_DEF(iemOp_clc)
11275{
11276 IEMOP_MNEMONIC("clc");
11277 IEMOP_HLP_NO_LOCK_PREFIX();
11278 IEM_MC_BEGIN(0, 0);
11279 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
11280 IEM_MC_ADVANCE_RIP();
11281 IEM_MC_END();
11282 return VINF_SUCCESS;
11283}
11284
11285
11286/** Opcode 0xf9. */
11287FNIEMOP_DEF(iemOp_stc)
11288{
11289 IEMOP_MNEMONIC("stc");
11290 IEMOP_HLP_NO_LOCK_PREFIX();
11291 IEM_MC_BEGIN(0, 0);
11292 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
11293 IEM_MC_ADVANCE_RIP();
11294 IEM_MC_END();
11295 return VINF_SUCCESS;
11296}
11297
11298
11299/** Opcode 0xfa. */
11300FNIEMOP_DEF(iemOp_cli)
11301{
11302 IEMOP_MNEMONIC("cli");
11303 IEMOP_HLP_NO_LOCK_PREFIX();
11304 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
11305}
11306
11307
11308FNIEMOP_DEF(iemOp_sti)
11309{
11310 IEMOP_MNEMONIC("sti");
11311 IEMOP_HLP_NO_LOCK_PREFIX();
11312 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
11313}
11314
11315
11316/** Opcode 0xfc. */
11317FNIEMOP_DEF(iemOp_cld)
11318{
11319 IEMOP_MNEMONIC("cld");
11320 IEMOP_HLP_NO_LOCK_PREFIX();
11321 IEM_MC_BEGIN(0, 0);
11322 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
11323 IEM_MC_ADVANCE_RIP();
11324 IEM_MC_END();
11325 return VINF_SUCCESS;
11326}
11327
11328
11329/** Opcode 0xfd. */
11330FNIEMOP_DEF(iemOp_std)
11331{
11332 IEMOP_MNEMONIC("std");
11333 IEMOP_HLP_NO_LOCK_PREFIX();
11334 IEM_MC_BEGIN(0, 0);
11335 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
11336 IEM_MC_ADVANCE_RIP();
11337 IEM_MC_END();
11338 return VINF_SUCCESS;
11339}
11340
11341
11342/** Opcode 0xfe. */
11343FNIEMOP_DEF(iemOp_Grp4)
11344{
11345 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11346 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11347 {
11348 case 0:
11349 IEMOP_MNEMONIC("inc Ev");
11350 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
11351 case 1:
11352 IEMOP_MNEMONIC("dec Ev");
11353 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
11354 default:
11355 IEMOP_MNEMONIC("grp4-ud");
11356 return IEMOP_RAISE_INVALID_OPCODE();
11357 }
11358}
11359
11360
11361/**
11362 * Opcode 0xff /2.
11363 * @param bRm The RM byte.
11364 */
11365FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
11366{
11367 IEMOP_MNEMONIC("calln Ev");
11368 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11369 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11370
11371 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11372 {
11373 /* The new RIP is taken from a register. */
11374 switch (pIemCpu->enmEffOpSize)
11375 {
11376 case IEMMODE_16BIT:
11377 IEM_MC_BEGIN(1, 0);
11378 IEM_MC_ARG(uint16_t, u16Target, 0);
11379 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11380 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11381 IEM_MC_END()
11382 return VINF_SUCCESS;
11383
11384 case IEMMODE_32BIT:
11385 IEM_MC_BEGIN(1, 0);
11386 IEM_MC_ARG(uint32_t, u32Target, 0);
11387 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11388 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11389 IEM_MC_END()
11390 return VINF_SUCCESS;
11391
11392 case IEMMODE_64BIT:
11393 IEM_MC_BEGIN(1, 0);
11394 IEM_MC_ARG(uint64_t, u64Target, 0);
11395 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11396 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11397 IEM_MC_END()
11398 return VINF_SUCCESS;
11399
11400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11401 }
11402 }
11403 else
11404 {
11405 /* The new RIP is taken from a register. */
11406 switch (pIemCpu->enmEffOpSize)
11407 {
11408 case IEMMODE_16BIT:
11409 IEM_MC_BEGIN(1, 1);
11410 IEM_MC_ARG(uint16_t, u16Target, 0);
11411 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11412 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11413 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11414 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11415 IEM_MC_END()
11416 return VINF_SUCCESS;
11417
11418 case IEMMODE_32BIT:
11419 IEM_MC_BEGIN(1, 1);
11420 IEM_MC_ARG(uint32_t, u32Target, 0);
11421 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11422 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11423 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11424 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11425 IEM_MC_END()
11426 return VINF_SUCCESS;
11427
11428 case IEMMODE_64BIT:
11429 IEM_MC_BEGIN(1, 1);
11430 IEM_MC_ARG(uint64_t, u64Target, 0);
11431 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11432 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11433 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11434 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11435 IEM_MC_END()
11436 return VINF_SUCCESS;
11437
11438 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11439 }
11440 }
11441}
11442
11443
11444/**
11445 * Opcode 0xff /3.
11446 * @param bRm The RM byte.
11447 */
11448FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
11449{
11450 IEMOP_MNEMONIC("callf Ep");
11451 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11452
11453 /* Registers? How?? */
11454 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11455 {
11456 /** @todo How the heck does a 'callf eax' work? Probably just have to
11457 * search the docs... */
11458 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
11459 }
11460
11461 /* Far pointer loaded from memory. */
11462 switch (pIemCpu->enmEffOpSize)
11463 {
11464 case IEMMODE_16BIT:
11465 IEM_MC_BEGIN(3, 1);
11466 IEM_MC_ARG(uint16_t, u16Sel, 0);
11467 IEM_MC_ARG(uint16_t, offSeg, 1);
11468 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11469 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11470 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11471 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11472 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11473 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11474 IEM_MC_END();
11475 return VINF_SUCCESS;
11476
11477 case IEMMODE_32BIT:
11478 IEM_MC_BEGIN(3, 1);
11479 IEM_MC_ARG(uint16_t, u16Sel, 0);
11480 IEM_MC_ARG(uint32_t, offSeg, 1);
11481 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11482 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11483 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11484 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11485 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11486 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11487 IEM_MC_END();
11488 return VINF_SUCCESS;
11489
11490 case IEMMODE_64BIT:
11491 IEM_MC_BEGIN(3, 1);
11492 IEM_MC_ARG(uint16_t, u16Sel, 0);
11493 IEM_MC_ARG(uint64_t, offSeg, 1);
11494 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11495 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11496 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11497 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11498 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11499 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11500 IEM_MC_END();
11501 return VINF_SUCCESS;
11502
11503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11504 }
11505}
11506
11507
11508/**
11509 * Opcode 0xff /4.
11510 * @param bRm The RM byte.
11511 */
11512FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
11513{
11514 IEMOP_MNEMONIC("jmpn Ev");
11515 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11516 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11517
11518 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11519 {
11520 /* The new RIP is taken from a register. */
11521 switch (pIemCpu->enmEffOpSize)
11522 {
11523 case IEMMODE_16BIT:
11524 IEM_MC_BEGIN(0, 1);
11525 IEM_MC_LOCAL(uint16_t, u16Target);
11526 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11527 IEM_MC_SET_RIP_U16(u16Target);
11528 IEM_MC_END()
11529 return VINF_SUCCESS;
11530
11531 case IEMMODE_32BIT:
11532 IEM_MC_BEGIN(0, 1);
11533 IEM_MC_LOCAL(uint32_t, u32Target);
11534 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11535 IEM_MC_SET_RIP_U32(u32Target);
11536 IEM_MC_END()
11537 return VINF_SUCCESS;
11538
11539 case IEMMODE_64BIT:
11540 IEM_MC_BEGIN(0, 1);
11541 IEM_MC_LOCAL(uint64_t, u64Target);
11542 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11543 IEM_MC_SET_RIP_U64(u64Target);
11544 IEM_MC_END()
11545 return VINF_SUCCESS;
11546
11547 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11548 }
11549 }
11550 else
11551 {
11552 /* The new RIP is taken from a register. */
11553 switch (pIemCpu->enmEffOpSize)
11554 {
11555 case IEMMODE_16BIT:
11556 IEM_MC_BEGIN(0, 2);
11557 IEM_MC_LOCAL(uint16_t, u16Target);
11558 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11560 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11561 IEM_MC_SET_RIP_U16(u16Target);
11562 IEM_MC_END()
11563 return VINF_SUCCESS;
11564
11565 case IEMMODE_32BIT:
11566 IEM_MC_BEGIN(0, 2);
11567 IEM_MC_LOCAL(uint32_t, u32Target);
11568 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11569 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11570 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11571 IEM_MC_SET_RIP_U32(u32Target);
11572 IEM_MC_END()
11573 return VINF_SUCCESS;
11574
11575 case IEMMODE_64BIT:
11576 IEM_MC_BEGIN(0, 2);
11577 IEM_MC_LOCAL(uint32_t, u32Target);
11578 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11579 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11580 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11581 IEM_MC_SET_RIP_U32(u32Target);
11582 IEM_MC_END()
11583 return VINF_SUCCESS;
11584
11585 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11586 }
11587 }
11588}
11589
11590
11591/**
11592 * Opcode 0xff /5.
11593 * @param bRm The RM byte.
11594 */
11595FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
11596{
11597 IEMOP_MNEMONIC("jmp Ep");
11598 IEMOP_HLP_NO_64BIT();
11599 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
11600
11601 /* Decode the far pointer address and pass it on to the far call C
11602 implementation. */
11603 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11604 {
11605 /** @todo How the heck does a 'callf eax' work? Probably just have to
11606 * search the docs... */
11607 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
11608 }
11609
11610 /* Far pointer loaded from memory. */
11611 switch (pIemCpu->enmEffOpSize)
11612 {
11613 case IEMMODE_16BIT:
11614 IEM_MC_BEGIN(3, 1);
11615 IEM_MC_ARG(uint16_t, u16Sel, 0);
11616 IEM_MC_ARG(uint16_t, offSeg, 1);
11617 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11618 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11619 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11620 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11621 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11622 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11623 IEM_MC_END();
11624 return VINF_SUCCESS;
11625
11626 case IEMMODE_32BIT:
11627 IEM_MC_BEGIN(3, 1);
11628 IEM_MC_ARG(uint16_t, u16Sel, 0);
11629 IEM_MC_ARG(uint32_t, offSeg, 1);
11630 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11631 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11632 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11633 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11634 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11635 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11636 IEM_MC_END();
11637 return VINF_SUCCESS;
11638
11639 case IEMMODE_64BIT:
11640 IEM_MC_BEGIN(3, 1);
11641 IEM_MC_ARG(uint16_t, u16Sel, 0);
11642 IEM_MC_ARG(uint64_t, offSeg, 1);
11643 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11644 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11645 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11646 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11647 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11648 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11649 IEM_MC_END();
11650 return VINF_SUCCESS;
11651
11652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11653 }
11654}
11655
11656
11657/**
11658 * Opcode 0xff /6.
11659 * @param bRm The RM byte.
11660 */
11661FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
11662{
11663 IEMOP_MNEMONIC("push Ev");
11664 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11665
11666 /* Registers are handled by a common worker. */
11667 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11668 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11669
11670 /* Memory we do here. */
11671 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11672 switch (pIemCpu->enmEffOpSize)
11673 {
11674 case IEMMODE_16BIT:
11675 IEM_MC_BEGIN(0, 2);
11676 IEM_MC_LOCAL(uint16_t, u16Src);
11677 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11678 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11679 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11680 IEM_MC_PUSH_U16(u16Src);
11681 IEM_MC_ADVANCE_RIP();
11682 IEM_MC_END();
11683 return VINF_SUCCESS;
11684
11685 case IEMMODE_32BIT:
11686 IEM_MC_BEGIN(0, 2);
11687 IEM_MC_LOCAL(uint32_t, u32Src);
11688 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11689 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11690 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11691 IEM_MC_PUSH_U32(u32Src);
11692 IEM_MC_ADVANCE_RIP();
11693 IEM_MC_END();
11694 return VINF_SUCCESS;
11695
11696 case IEMMODE_64BIT:
11697 IEM_MC_BEGIN(0, 2);
11698 IEM_MC_LOCAL(uint64_t, u64Src);
11699 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11700 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11701 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11702 IEM_MC_PUSH_U64(u64Src);
11703 IEM_MC_ADVANCE_RIP();
11704 IEM_MC_END();
11705 return VINF_SUCCESS;
11706
11707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11708 }
11709}
11710
11711
11712/** Opcode 0xff. */
11713FNIEMOP_DEF(iemOp_Grp5)
11714{
11715 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11716 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11717 {
11718 case 0:
11719 IEMOP_MNEMONIC("inc Ev");
11720 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
11721 case 1:
11722 IEMOP_MNEMONIC("dec Ev");
11723 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
11724 case 2:
11725 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
11726 case 3:
11727 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
11728 case 4:
11729 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
11730 case 5:
11731 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
11732 case 6:
11733 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
11734 case 7:
11735 IEMOP_MNEMONIC("grp5-ud");
11736 return IEMOP_RAISE_INVALID_OPCODE();
11737 }
11738 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
11739}
11740
11741
11742
11743const PFNIEMOP g_apfnOneByteMap[256] =
11744{
11745 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
11746 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
11747 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
11748 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
11749 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
11750 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
11751 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
11752 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
11753 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
11754 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
11755 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
11756 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
11757 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
11758 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
11759 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
11760 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
11761 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
11762 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
11763 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
11764 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
11765 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
11766 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
11767 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
11768 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
11769 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
11770 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
11771 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
11772 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
11773 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
11774 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
11775 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
11776 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
11777 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
11778 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
11779 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
11780 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
11781 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
11782 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
11783 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
11784 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
11785 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
11786 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
11787 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
11788 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
11789 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
11790 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
11791 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
11792 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
11793 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
11794 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
11795 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
11796 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
11797 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
11798 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
11799 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
11800 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
11801 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
11802 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
11803 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
11804 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
11805 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
11806 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
11807 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
11808 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
11809};
11810
11811
11812/** @} */
11813
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette