VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 40082

Last change on this file since 40082 was 40082, checked in by vboxsync, 13 years ago

More FPU instruction stubs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 419.9 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 40082 2012-02-12 13:40:29Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 IEM_MC_ADVANCE_RIP();
133 IEM_MC_END();
134 break;
135
136 case IEMMODE_64BIT:
137 IEM_MC_BEGIN(3, 0);
138 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
139 IEM_MC_ARG(uint64_t, u64Src, 1);
140 IEM_MC_ARG(uint32_t *, pEFlags, 2);
141
142 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
143 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
144 IEM_MC_REF_EFLAGS(pEFlags);
145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
146
147 IEM_MC_ADVANCE_RIP();
148 IEM_MC_END();
149 break;
150 }
151 }
152 else
153 {
154 /*
155 * We're accessing memory.
156 * Note! We're putting the eflags on the stack here so we can commit them
157 * after the memory.
158 */
159 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
160 switch (pIemCpu->enmEffOpSize)
161 {
162 case IEMMODE_16BIT:
163 IEM_MC_BEGIN(3, 2);
164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
165 IEM_MC_ARG(uint16_t, u16Src, 1);
166 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
167 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
168
169 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
170 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
171 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
172 IEM_MC_FETCH_EFLAGS(EFlags);
173 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
175 else
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
177
178 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
179 IEM_MC_COMMIT_EFLAGS(EFlags);
180 IEM_MC_ADVANCE_RIP();
181 IEM_MC_END();
182 break;
183
184 case IEMMODE_32BIT:
185 IEM_MC_BEGIN(3, 2);
186 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
187 IEM_MC_ARG(uint32_t, u32Src, 1);
188 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
190
191 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
192 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
193 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
194 IEM_MC_FETCH_EFLAGS(EFlags);
195 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
197 else
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
199
200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
201 IEM_MC_COMMIT_EFLAGS(EFlags);
202 IEM_MC_ADVANCE_RIP();
203 IEM_MC_END();
204 break;
205
206 case IEMMODE_64BIT:
207 IEM_MC_BEGIN(3, 2);
208 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
209 IEM_MC_ARG(uint64_t, u64Src, 1);
210 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
211 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
212
213 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
214 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
215 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
216 IEM_MC_FETCH_EFLAGS(EFlags);
217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
219 else
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
221
222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
223 IEM_MC_COMMIT_EFLAGS(EFlags);
224 IEM_MC_ADVANCE_RIP();
225 IEM_MC_END();
226 break;
227 }
228 }
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
235 * the destination.
236 *
237 * @param pImpl Pointer to the instruction implementation (assembly).
238 */
239FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
240{
241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
243
244 /*
245 * If rm is denoting a register, no more instruction bytes.
246 */
247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
248 {
249 IEM_MC_BEGIN(3, 0);
250 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
251 IEM_MC_ARG(uint8_t, u8Src, 1);
252 IEM_MC_ARG(uint32_t *, pEFlags, 2);
253
254 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
255 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
256 IEM_MC_REF_EFLAGS(pEFlags);
257 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
258
259 IEM_MC_ADVANCE_RIP();
260 IEM_MC_END();
261 }
262 else
263 {
264 /*
265 * We're accessing memory.
266 */
267 IEM_MC_BEGIN(3, 1);
268 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
269 IEM_MC_ARG(uint8_t, u8Src, 1);
270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
272
273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
274 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
275 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
276 IEM_MC_REF_EFLAGS(pEFlags);
277 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
278
279 IEM_MC_ADVANCE_RIP();
280 IEM_MC_END();
281 }
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
288 * register as the destination.
289 *
290 * @param pImpl Pointer to the instruction implementation (assembly).
291 */
292FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
293{
294 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
295 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
296
297 /*
298 * If rm is denoting a register, no more instruction bytes.
299 */
300 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
301 {
302 switch (pIemCpu->enmEffOpSize)
303 {
304 case IEMMODE_16BIT:
305 IEM_MC_BEGIN(3, 0);
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
307 IEM_MC_ARG(uint16_t, u16Src, 1);
308 IEM_MC_ARG(uint32_t *, pEFlags, 2);
309
310 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
311 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
312 IEM_MC_REF_EFLAGS(pEFlags);
313 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
314
315 IEM_MC_ADVANCE_RIP();
316 IEM_MC_END();
317 break;
318
319 case IEMMODE_32BIT:
320 IEM_MC_BEGIN(3, 0);
321 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
322 IEM_MC_ARG(uint32_t, u32Src, 1);
323 IEM_MC_ARG(uint32_t *, pEFlags, 2);
324
325 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
326 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
327 IEM_MC_REF_EFLAGS(pEFlags);
328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
329
330 IEM_MC_ADVANCE_RIP();
331 IEM_MC_END();
332 break;
333
334 case IEMMODE_64BIT:
335 IEM_MC_BEGIN(3, 0);
336 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
337 IEM_MC_ARG(uint64_t, u64Src, 1);
338 IEM_MC_ARG(uint32_t *, pEFlags, 2);
339
340 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
341 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
342 IEM_MC_REF_EFLAGS(pEFlags);
343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
344
345 IEM_MC_ADVANCE_RIP();
346 IEM_MC_END();
347 break;
348 }
349 }
350 else
351 {
352 /*
353 * We're accessing memory.
354 */
355 switch (pIemCpu->enmEffOpSize)
356 {
357 case IEMMODE_16BIT:
358 IEM_MC_BEGIN(3, 1);
359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
360 IEM_MC_ARG(uint16_t, u16Src, 1);
361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
363
364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
365 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
366 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
367 IEM_MC_REF_EFLAGS(pEFlags);
368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
369
370 IEM_MC_ADVANCE_RIP();
371 IEM_MC_END();
372 break;
373
374 case IEMMODE_32BIT:
375 IEM_MC_BEGIN(3, 1);
376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
377 IEM_MC_ARG(uint32_t, u32Src, 1);
378 IEM_MC_ARG(uint32_t *, pEFlags, 2);
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
380
381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
382 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
383 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
384 IEM_MC_REF_EFLAGS(pEFlags);
385 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
386
387 IEM_MC_ADVANCE_RIP();
388 IEM_MC_END();
389 break;
390
391 case IEMMODE_64BIT:
392 IEM_MC_BEGIN(3, 1);
393 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
394 IEM_MC_ARG(uint64_t, u64Src, 1);
395 IEM_MC_ARG(uint32_t *, pEFlags, 2);
396 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
397
398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
399 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
400 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
401 IEM_MC_REF_EFLAGS(pEFlags);
402 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
403
404 IEM_MC_ADVANCE_RIP();
405 IEM_MC_END();
406 break;
407 }
408 }
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
415 * a byte immediate.
416 *
417 * @param pImpl Pointer to the instruction implementation (assembly).
418 */
419FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
420{
421 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
422 IEMOP_HLP_NO_LOCK_PREFIX();
423
424 IEM_MC_BEGIN(3, 0);
425 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
426 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
428
429 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
430 IEM_MC_REF_EFLAGS(pEFlags);
431 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
432
433 IEM_MC_ADVANCE_RIP();
434 IEM_MC_END();
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Common worker for instructions like ADD, AND, OR, ++ with working on
441 * AX/EAX/RAX with a word/dword immediate.
442 *
443 * @param pImpl Pointer to the instruction implementation (assembly).
444 */
445FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
446{
447 switch (pIemCpu->enmEffOpSize)
448 {
449 case IEMMODE_16BIT:
450 {
451 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
452 IEMOP_HLP_NO_LOCK_PREFIX();
453
454 IEM_MC_BEGIN(3, 0);
455 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
456 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
458
459 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
460 IEM_MC_REF_EFLAGS(pEFlags);
461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
462
463 IEM_MC_ADVANCE_RIP();
464 IEM_MC_END();
465 return VINF_SUCCESS;
466 }
467
468 case IEMMODE_32BIT:
469 {
470 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
471 IEMOP_HLP_NO_LOCK_PREFIX();
472
473 IEM_MC_BEGIN(3, 0);
474 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
475 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
476 IEM_MC_ARG(uint32_t *, pEFlags, 2);
477
478 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
479 IEM_MC_REF_EFLAGS(pEFlags);
480 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
481
482 IEM_MC_ADVANCE_RIP();
483 IEM_MC_END();
484 return VINF_SUCCESS;
485 }
486
487 case IEMMODE_64BIT:
488 {
489 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
490 IEMOP_HLP_NO_LOCK_PREFIX();
491
492 IEM_MC_BEGIN(3, 0);
493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
494 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
496
497 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
498 IEM_MC_REF_EFLAGS(pEFlags);
499 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
500
501 IEM_MC_ADVANCE_RIP();
502 IEM_MC_END();
503 return VINF_SUCCESS;
504 }
505
506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
507 }
508}
509
510
511/** Opcodes 0xf1, 0xd6. */
512FNIEMOP_DEF(iemOp_Invalid)
513{
514 IEMOP_MNEMONIC("Invalid");
515 return IEMOP_RAISE_INVALID_OPCODE();
516}
517
518
519
520/** @name ..... opcodes.
521 *
522 * @{
523 */
524
525/** @} */
526
527
528/** @name Two byte opcodes (first byte 0x0f).
529 *
530 * @{
531 */
532
533/** Opcode 0x0f 0x00 /0. */
534FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
535
536
537/** Opcode 0x0f 0x00 /1. */
538FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
539
540
541/** Opcode 0x0f 0x00 /2. */
542FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
543{
544 IEMOP_HLP_NO_LOCK_PREFIX();
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEM_MC_BEGIN(1, 0);
548 IEM_MC_ARG(uint16_t, u16Sel, 0);
549 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
550 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
551 IEM_MC_END();
552 }
553 else
554 {
555 IEM_MC_BEGIN(1, 1);
556 IEM_MC_ARG(uint16_t, u16Sel, 0);
557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
558 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
560 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
561 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
562 IEM_MC_END();
563 }
564 return VINF_SUCCESS;
565}
566
567
568/** Opcode 0x0f 0x00 /3. */
569FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
570{
571 IEMOP_HLP_NO_LOCK_PREFIX();
572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
573 {
574 IEM_MC_BEGIN(1, 0);
575 IEM_MC_ARG(uint16_t, u16Sel, 0);
576 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
577 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
578 IEM_MC_END();
579 }
580 else
581 {
582 IEM_MC_BEGIN(1, 1);
583 IEM_MC_ARG(uint16_t, u16Sel, 0);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
585 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
587 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
588 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
589 IEM_MC_END();
590 }
591 return VINF_SUCCESS;
592}
593
594
595/** Opcode 0x0f 0x00 /4. */
596FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
597
598
599/** Opcode 0x0f 0x00 /5. */
600FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
601
602
603/** Opcode 0x0f 0x00. */
604FNIEMOP_DEF(iemOp_Grp6)
605{
606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
608 {
609 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
610 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
611 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
612 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
613 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
614 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
615 case 6: return IEMOP_RAISE_INVALID_OPCODE();
616 case 7: return IEMOP_RAISE_INVALID_OPCODE();
617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
618 }
619
620}
621
622
623/** Opcode 0x0f 0x01 /0. */
624FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
625{
626 NOREF(pIemCpu); NOREF(bRm);
627 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
628}
629
630
631/** Opcode 0x0f 0x01 /0. */
632FNIEMOP_DEF(iemOp_Grp7_vmcall)
633{
634 AssertFailed();
635 return IEMOP_RAISE_INVALID_OPCODE();
636}
637
638
639/** Opcode 0x0f 0x01 /0. */
640FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
641{
642 AssertFailed();
643 return IEMOP_RAISE_INVALID_OPCODE();
644}
645
646
647/** Opcode 0x0f 0x01 /0. */
648FNIEMOP_DEF(iemOp_Grp7_vmresume)
649{
650 AssertFailed();
651 return IEMOP_RAISE_INVALID_OPCODE();
652}
653
654
655/** Opcode 0x0f 0x01 /0. */
656FNIEMOP_DEF(iemOp_Grp7_vmxoff)
657{
658 AssertFailed();
659 return IEMOP_RAISE_INVALID_OPCODE();
660}
661
662
663/** Opcode 0x0f 0x01 /1. */
664FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
665{
666 NOREF(pIemCpu); NOREF(bRm);
667 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
668}
669
670
671/** Opcode 0x0f 0x01 /1. */
672FNIEMOP_DEF(iemOp_Grp7_monitor)
673{
674 NOREF(pIemCpu);
675 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
676}
677
678
679/** Opcode 0x0f 0x01 /1. */
680FNIEMOP_DEF(iemOp_Grp7_mwait)
681{
682 NOREF(pIemCpu);
683 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
684}
685
686
687/** Opcode 0x0f 0x01 /2. */
688FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
689{
690 IEMOP_HLP_NO_LOCK_PREFIX();
691
692 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
693 ? IEMMODE_64BIT
694 : pIemCpu->enmEffOpSize;
695 IEM_MC_BEGIN(3, 1);
696 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
697 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
698 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
699 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
700 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
701 IEM_MC_END();
702 return VINF_SUCCESS;
703}
704
705
706/** Opcode 0x0f 0x01 /2. */
707FNIEMOP_DEF(iemOp_Grp7_xgetbv)
708{
709 AssertFailed();
710 return IEMOP_RAISE_INVALID_OPCODE();
711}
712
713
714/** Opcode 0x0f 0x01 /2. */
715FNIEMOP_DEF(iemOp_Grp7_xsetbv)
716{
717 AssertFailed();
718 return IEMOP_RAISE_INVALID_OPCODE();
719}
720
721
722/** Opcode 0x0f 0x01 /3. */
723FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
724{
725 IEMOP_HLP_NO_LOCK_PREFIX();
726
727 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
728 ? IEMMODE_64BIT
729 : pIemCpu->enmEffOpSize;
730 IEM_MC_BEGIN(3, 1);
731 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
732 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
733 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
734 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
735 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
736 IEM_MC_END();
737 return VINF_SUCCESS;
738}
739
740
741/** Opcode 0x0f 0x01 /4. */
742FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
743{
744 IEMOP_HLP_NO_LOCK_PREFIX();
745 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
746 {
747 switch (pIemCpu->enmEffOpSize)
748 {
749 case IEMMODE_16BIT:
750 IEM_MC_BEGIN(0, 1);
751 IEM_MC_LOCAL(uint16_t, u16Tmp);
752 IEM_MC_FETCH_CR0_U16(u16Tmp);
753 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
754 IEM_MC_ADVANCE_RIP();
755 IEM_MC_END();
756 return VINF_SUCCESS;
757
758 case IEMMODE_32BIT:
759 IEM_MC_BEGIN(0, 1);
760 IEM_MC_LOCAL(uint32_t, u32Tmp);
761 IEM_MC_FETCH_CR0_U32(u32Tmp);
762 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
763 IEM_MC_ADVANCE_RIP();
764 IEM_MC_END();
765 return VINF_SUCCESS;
766
767 case IEMMODE_64BIT:
768 IEM_MC_BEGIN(0, 1);
769 IEM_MC_LOCAL(uint64_t, u64Tmp);
770 IEM_MC_FETCH_CR0_U64(u64Tmp);
771 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
772 IEM_MC_ADVANCE_RIP();
773 IEM_MC_END();
774 return VINF_SUCCESS;
775
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778 }
779 else
780 {
781 /* Ignore operand size here, memory refs are always 16-bit. */
782 IEM_MC_BEGIN(0, 2);
783 IEM_MC_LOCAL(uint16_t, u16Tmp);
784 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
785 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
786 IEM_MC_FETCH_CR0_U16(u16Tmp);
787 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
788 IEM_MC_ADVANCE_RIP();
789 IEM_MC_END();
790 return VINF_SUCCESS;
791 }
792}
793
794
795/** Opcode 0x0f 0x01 /6. */
796FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
797{
798 /* The operand size is effectively ignored, all is 16-bit and only the
799 lower 3-bits are used. */
800 IEMOP_HLP_NO_LOCK_PREFIX();
801 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
802 {
803 IEM_MC_BEGIN(1, 0);
804 IEM_MC_ARG(uint16_t, u16Tmp, 0);
805 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
806 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
807 IEM_MC_END();
808 }
809 else
810 {
811 IEM_MC_BEGIN(1, 1);
812 IEM_MC_ARG(uint16_t, u16Tmp, 0);
813 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
814 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
815 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
816 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
817 IEM_MC_END();
818 }
819 return VINF_SUCCESS;
820}
821
822
823/** Opcode 0x0f 0x01 /7. */
824FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
825{
826 IEMOP_HLP_NO_LOCK_PREFIX();
827 IEM_MC_BEGIN(1, 1);
828 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
829 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
830 IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
831 IEM_MC_END();
832 return VINF_SUCCESS;
833}
834
835
836/** Opcode 0x0f 0x01 /7. */
837FNIEMOP_DEF(iemOp_Grp7_swapgs)
838{
839 NOREF(pIemCpu);
840 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
841}
842
843
844/** Opcode 0x0f 0x01 /7. */
845FNIEMOP_DEF(iemOp_Grp7_rdtscp)
846{
847 NOREF(pIemCpu);
848 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
849}
850
851
852/** Opcode 0x0f 0x01. */
853FNIEMOP_DEF(iemOp_Grp7)
854{
855 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
856 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
857 {
858 case 0:
859 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
860 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
861 switch (bRm & X86_MODRM_RM_MASK)
862 {
863 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
864 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
865 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
866 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
867 }
868 return IEMOP_RAISE_INVALID_OPCODE();
869
870 case 1:
871 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
872 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
873 switch (bRm & X86_MODRM_RM_MASK)
874 {
875 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
876 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
877 }
878 return IEMOP_RAISE_INVALID_OPCODE();
879
880 case 2:
881 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
882 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
883 switch (bRm & X86_MODRM_RM_MASK)
884 {
885 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
886 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
887 }
888 return IEMOP_RAISE_INVALID_OPCODE();
889
890 case 3:
891 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
892 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
893 return IEMOP_RAISE_INVALID_OPCODE();
894
895 case 4:
896 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
897
898 case 5:
899 return IEMOP_RAISE_INVALID_OPCODE();
900
901 case 6:
902 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
903
904 case 7:
905 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
906 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
907 switch (bRm & X86_MODRM_RM_MASK)
908 {
909 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
910 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
911 }
912 return IEMOP_RAISE_INVALID_OPCODE();
913
914 IEM_NOT_REACHED_DEFAULT_CASE_RET();
915 }
916}
917
918
919/** Opcode 0x0f 0x02. */
920FNIEMOP_STUB(iemOp_lar_Gv_Ew);
921/** Opcode 0x0f 0x03. */
922FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
923/** Opcode 0x0f 0x04. */
924FNIEMOP_STUB(iemOp_syscall);
925
926
927/** Opcode 0x0f 0x05. */
928FNIEMOP_DEF(iemOp_clts)
929{
930 IEMOP_MNEMONIC("clts");
931 IEMOP_HLP_NO_LOCK_PREFIX();
932 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
933}
934
935
936/** Opcode 0x0f 0x06. */
937FNIEMOP_STUB(iemOp_sysret);
938/** Opcode 0x0f 0x08. */
939FNIEMOP_STUB(iemOp_invd);
940/** Opcode 0x0f 0x09. */
941FNIEMOP_STUB(iemOp_wbinvd);
942/** Opcode 0x0f 0x0b. */
943FNIEMOP_STUB(iemOp_ud2);
944
945/** Opcode 0x0f 0x0d. */
946FNIEMOP_DEF(iemOp_nop_Ev_GrpP)
947{
948 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
949 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_AMD_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
950 X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF))
951 {
952 IEMOP_MNEMONIC("GrpP");
953 return IEMOP_RAISE_INVALID_OPCODE();
954 }
955
956 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
957 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
958 {
959 IEMOP_MNEMONIC("GrpP");
960 return IEMOP_RAISE_INVALID_OPCODE();
961 }
962
963 IEMOP_HLP_NO_LOCK_PREFIX();
964 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
965 {
966 case 2: /* Aliased to /0 for the time being. */
967 case 4: /* Aliased to /0 for the time being. */
968 case 5: /* Aliased to /0 for the time being. */
969 case 6: /* Aliased to /0 for the time being. */
970 case 7: /* Aliased to /0 for the time being. */
971 case 0: IEMOP_MNEMONIC("prefetch"); break;
972 case 1: IEMOP_MNEMONIC("prefetchw "); break;
973 case 3: IEMOP_MNEMONIC("prefetchw"); break;
974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
975 }
976
977 IEM_MC_BEGIN(0, 1);
978 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
979 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
980 /* Currently a NOP. */
981 IEM_MC_ADVANCE_RIP();
982 IEM_MC_END();
983 return VINF_SUCCESS;
984}
985
986
987/** Opcode 0x0f 0x0e. */
988FNIEMOP_STUB(iemOp_femms);
989/** Opcode 0x0f 0x0f. */
990FNIEMOP_STUB(iemOp_3Dnow);
991/** Opcode 0x0f 0x10. */
992FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
993/** Opcode 0x0f 0x11. */
994FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
995/** Opcode 0x0f 0x12. */
996FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
997/** Opcode 0x0f 0x13. */
998FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
999/** Opcode 0x0f 0x14. */
1000FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
1001/** Opcode 0x0f 0x15. */
1002FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
1003/** Opcode 0x0f 0x16. */
1004FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
1005/** Opcode 0x0f 0x17. */
1006FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
1007
1008
1009/** Opcode 0x0f 0x18. */
1010FNIEMOP_DEF(iemOp_prefetch_Grp16)
1011{
1012 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1013 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1014 {
1015 IEMOP_HLP_NO_LOCK_PREFIX();
1016 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1017 {
1018 case 4: /* Aliased to /0 for the time being according to AMD. */
1019 case 5: /* Aliased to /0 for the time being according to AMD. */
1020 case 6: /* Aliased to /0 for the time being according to AMD. */
1021 case 7: /* Aliased to /0 for the time being according to AMD. */
1022 case 0: IEMOP_MNEMONIC("prefetchNTA m8"); break;
1023 case 1: IEMOP_MNEMONIC("prefetchT0 m8"); break;
1024 case 2: IEMOP_MNEMONIC("prefetchT1 m8"); break;
1025 case 3: IEMOP_MNEMONIC("prefetchT2 m8"); break;
1026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1027 }
1028
1029 IEM_MC_BEGIN(0, 1);
1030 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1031 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
1032 /* Currently a NOP. */
1033 IEM_MC_ADVANCE_RIP();
1034 IEM_MC_END();
1035 return VINF_SUCCESS;
1036 }
1037
1038 return IEMOP_RAISE_INVALID_OPCODE();
1039}
1040
1041
1042/** Opcode 0x0f 0x19..0x1f. */
1043FNIEMOP_DEF(iemOp_nop_Ev)
1044{
1045 IEMOP_HLP_NO_LOCK_PREFIX();
1046 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1047 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1048 {
1049 IEM_MC_BEGIN(0, 0);
1050 IEM_MC_ADVANCE_RIP();
1051 IEM_MC_END();
1052 }
1053 else
1054 {
1055 IEM_MC_BEGIN(0, 1);
1056 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1057 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
1058 /* Currently a NOP. */
1059 IEM_MC_ADVANCE_RIP();
1060 IEM_MC_END();
1061 }
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/** Opcode 0x0f 0x20. */
1067FNIEMOP_DEF(iemOp_mov_Rd_Cd)
1068{
1069 /* mod is ignored, as is operand size overrides. */
1070 IEMOP_MNEMONIC("mov Rd,Cd");
1071 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1072 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1073 else
1074 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1075
1076 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1077 * before the privilege level violation (\#GP). */
1078 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1079 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1080 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1081 {
1082 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1083 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1084 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1085 iCrReg |= 8;
1086 }
1087 switch (iCrReg)
1088 {
1089 case 0: case 2: case 3: case 4: case 8:
1090 break;
1091 default:
1092 return IEMOP_RAISE_INVALID_OPCODE();
1093 }
1094
1095 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
1096}
1097
1098
1099/** Opcode 0x0f 0x21. */
1100FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1101{
1102 IEMOP_MNEMONIC("mov Rd,Dd");
1103 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1104 IEMOP_HLP_NO_LOCK_PREFIX();
1105 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1106 return IEMOP_RAISE_INVALID_OPCODE();
1107 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1108 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1109 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1110}
1111
1112
1113/** Opcode 0x0f 0x22. */
1114FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1115{
1116 /* mod is ignored, as is operand size overrides. */
1117 IEMOP_MNEMONIC("mov Cd,Rd");
1118 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1119 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1120 else
1121 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1122
1123 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1124 * before the privilege level violation (\#GP). */
1125 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1126 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1127 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1128 {
1129 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1130 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1131 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1132 iCrReg |= 8;
1133 }
1134 switch (iCrReg)
1135 {
1136 case 0: case 2: case 3: case 4: case 8:
1137 break;
1138 default:
1139 return IEMOP_RAISE_INVALID_OPCODE();
1140 }
1141
1142 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1143}
1144
1145
1146/** Opcode 0x0f 0x23. */
1147FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1148{
1149 IEMOP_MNEMONIC("mov Dd,Rd");
1150 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1151 IEMOP_HLP_NO_LOCK_PREFIX();
1152 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1153 return IEMOP_RAISE_INVALID_OPCODE();
1154 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1155 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1156 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1157}
1158
1159
1160/** Opcode 0x0f 0x24. */
1161FNIEMOP_DEF(iemOp_mov_Rd_Td)
1162{
1163 IEMOP_MNEMONIC("mov Rd,Td");
1164/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1165 return IEMOP_RAISE_INVALID_OPCODE();
1166}
1167
1168
1169
1170/** Opcode 0x0f 0x26. */
1171FNIEMOP_DEF(iemOp_mov_Td_Rd)
1172{
1173 IEMOP_MNEMONIC("mov Td,Rd");
1174 return IEMOP_RAISE_INVALID_OPCODE();
1175}
1176
1177
1178/** Opcode 0x0f 0x28. */
1179FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1180/** Opcode 0x0f 0x29. */
1181FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1182/** Opcode 0x0f 0x2a. */
1183FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1184/** Opcode 0x0f 0x2b. */
1185FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1186/** Opcode 0x0f 0x2c. */
1187FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1188/** Opcode 0x0f 0x2d. */
1189FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1190/** Opcode 0x0f 0x2e. */
1191FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1192/** Opcode 0x0f 0x2f. */
1193FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1194/** Opcode 0x0f 0x30. */
1195FNIEMOP_STUB(iemOp_wrmsr);
1196
1197
1198/** Opcode 0x0f 0x31. */
1199FNIEMOP_DEF(iemOp_rdtsc)
1200{
1201 IEMOP_MNEMONIC("rdtsc");
1202 IEMOP_HLP_NO_LOCK_PREFIX();
1203 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1204}
1205
1206
1207/** Opcode 0x0f 0x33. */
1208FNIEMOP_DEF(iemOp_rdmsr)
1209{
1210 IEMOP_MNEMONIC("rdmsr");
1211 IEMOP_HLP_NO_LOCK_PREFIX();
1212 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdmsr);
1213}
1214
1215
1216/** Opcode 0x0f 0x34. */
1217FNIEMOP_STUB(iemOp_rdpmc);
1218/** Opcode 0x0f 0x34. */
1219FNIEMOP_STUB(iemOp_sysenter);
1220/** Opcode 0x0f 0x35. */
1221FNIEMOP_STUB(iemOp_sysexit);
1222/** Opcode 0x0f 0x37. */
1223FNIEMOP_STUB(iemOp_getsec);
1224/** Opcode 0x0f 0x38. */
1225FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1226/** Opcode 0x0f 0x39. */
1227FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1228/** Opcode 0x0f 0x3c (?). */
1229FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1230
1231/**
1232 * Implements a conditional move.
1233 *
1234 * Wish there was an obvious way to do this where we could share and reduce
1235 * code bloat.
1236 *
1237 * @param a_Cnd The conditional "microcode" operation.
1238 */
1239#define CMOV_X(a_Cnd) \
1240 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1241 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1242 { \
1243 switch (pIemCpu->enmEffOpSize) \
1244 { \
1245 case IEMMODE_16BIT: \
1246 IEM_MC_BEGIN(0, 1); \
1247 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1248 a_Cnd { \
1249 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1250 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1251 } IEM_MC_ENDIF(); \
1252 IEM_MC_ADVANCE_RIP(); \
1253 IEM_MC_END(); \
1254 return VINF_SUCCESS; \
1255 \
1256 case IEMMODE_32BIT: \
1257 IEM_MC_BEGIN(0, 1); \
1258 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1259 a_Cnd { \
1260 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1261 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1262 } IEM_MC_ELSE() { \
1263 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1264 } IEM_MC_ENDIF(); \
1265 IEM_MC_ADVANCE_RIP(); \
1266 IEM_MC_END(); \
1267 return VINF_SUCCESS; \
1268 \
1269 case IEMMODE_64BIT: \
1270 IEM_MC_BEGIN(0, 1); \
1271 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1272 a_Cnd { \
1273 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1274 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1275 } IEM_MC_ENDIF(); \
1276 IEM_MC_ADVANCE_RIP(); \
1277 IEM_MC_END(); \
1278 return VINF_SUCCESS; \
1279 \
1280 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1281 } \
1282 } \
1283 else \
1284 { \
1285 switch (pIemCpu->enmEffOpSize) \
1286 { \
1287 case IEMMODE_16BIT: \
1288 IEM_MC_BEGIN(0, 2); \
1289 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1290 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1291 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1292 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1293 a_Cnd { \
1294 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1295 } IEM_MC_ENDIF(); \
1296 IEM_MC_ADVANCE_RIP(); \
1297 IEM_MC_END(); \
1298 return VINF_SUCCESS; \
1299 \
1300 case IEMMODE_32BIT: \
1301 IEM_MC_BEGIN(0, 2); \
1302 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1303 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1304 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1305 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1306 a_Cnd { \
1307 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1308 } IEM_MC_ELSE() { \
1309 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1310 } IEM_MC_ENDIF(); \
1311 IEM_MC_ADVANCE_RIP(); \
1312 IEM_MC_END(); \
1313 return VINF_SUCCESS; \
1314 \
1315 case IEMMODE_64BIT: \
1316 IEM_MC_BEGIN(0, 2); \
1317 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1318 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1319 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1320 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1321 a_Cnd { \
1322 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1323 } IEM_MC_ENDIF(); \
1324 IEM_MC_ADVANCE_RIP(); \
1325 IEM_MC_END(); \
1326 return VINF_SUCCESS; \
1327 \
1328 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1329 } \
1330 } do {} while (0)
1331
1332
1333
1334/** Opcode 0x0f 0x40. */
1335FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1336{
1337 IEMOP_MNEMONIC("cmovo Gv,Ev");
1338 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1339}
1340
1341
1342/** Opcode 0x0f 0x41. */
1343FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1344{
1345 IEMOP_MNEMONIC("cmovno Gv,Ev");
1346 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1347}
1348
1349
1350/** Opcode 0x0f 0x42. */
1351FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1352{
1353 IEMOP_MNEMONIC("cmovc Gv,Ev");
1354 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1355}
1356
1357
1358/** Opcode 0x0f 0x43. */
1359FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1360{
1361 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1362 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1363}
1364
1365
1366/** Opcode 0x0f 0x44. */
1367FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1368{
1369 IEMOP_MNEMONIC("cmove Gv,Ev");
1370 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1371}
1372
1373
1374/** Opcode 0x0f 0x45. */
1375FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1376{
1377 IEMOP_MNEMONIC("cmovne Gv,Ev");
1378 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1379}
1380
1381
1382/** Opcode 0x0f 0x46. */
1383FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1384{
1385 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1386 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1387}
1388
1389
1390/** Opcode 0x0f 0x47. */
1391FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1392{
1393 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1394 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1395}
1396
1397
1398/** Opcode 0x0f 0x48. */
1399FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1400{
1401 IEMOP_MNEMONIC("cmovs Gv,Ev");
1402 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1403}
1404
1405
1406/** Opcode 0x0f 0x49. */
1407FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1408{
1409 IEMOP_MNEMONIC("cmovns Gv,Ev");
1410 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1411}
1412
1413
1414/** Opcode 0x0f 0x4a. */
1415FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1416{
1417 IEMOP_MNEMONIC("cmovp Gv,Ev");
1418 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1419}
1420
1421
1422/** Opcode 0x0f 0x4b. */
1423FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1424{
1425 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1426 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1427}
1428
1429
1430/** Opcode 0x0f 0x4c. */
1431FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1432{
1433 IEMOP_MNEMONIC("cmovl Gv,Ev");
1434 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1435}
1436
1437
1438/** Opcode 0x0f 0x4d. */
1439FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1440{
1441 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1442 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1443}
1444
1445
1446/** Opcode 0x0f 0x4e. */
1447FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1448{
1449 IEMOP_MNEMONIC("cmovle Gv,Ev");
1450 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1451}
1452
1453
1454/** Opcode 0x0f 0x4f. */
1455FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1456{
1457 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1458 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1459}
1460
1461#undef CMOV_X
1462
1463/** Opcode 0x0f 0x50. */
1464FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1465/** Opcode 0x0f 0x51. */
1466FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1467/** Opcode 0x0f 0x52. */
1468FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1469/** Opcode 0x0f 0x53. */
1470FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1471/** Opcode 0x0f 0x54. */
1472FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1473/** Opcode 0x0f 0x55. */
1474FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1475/** Opcode 0x0f 0x56. */
1476FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1477/** Opcode 0x0f 0x57. */
1478FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1479/** Opcode 0x0f 0x58. */
1480FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1481/** Opcode 0x0f 0x59. */
1482FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1483/** Opcode 0x0f 0x5a. */
1484FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1485/** Opcode 0x0f 0x5b. */
1486FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1487/** Opcode 0x0f 0x5c. */
1488FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1489/** Opcode 0x0f 0x5d. */
1490FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1491/** Opcode 0x0f 0x5e. */
1492FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1493/** Opcode 0x0f 0x5f. */
1494FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1495/** Opcode 0x0f 0x60. */
1496FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1497/** Opcode 0x0f 0x61. */
1498FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1499/** Opcode 0x0f 0x62. */
1500FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1501/** Opcode 0x0f 0x63. */
1502FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1503/** Opcode 0x0f 0x64. */
1504FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1505/** Opcode 0x0f 0x65. */
1506FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1507/** Opcode 0x0f 0x66. */
1508FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1509/** Opcode 0x0f 0x67. */
1510FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1511/** Opcode 0x0f 0x68. */
1512FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1513/** Opcode 0x0f 0x69. */
1514FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1515/** Opcode 0x0f 0x6a. */
1516FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1517/** Opcode 0x0f 0x6b. */
1518FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1519/** Opcode 0x0f 0x6c. */
1520FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1521/** Opcode 0x0f 0x6d. */
1522FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1523/** Opcode 0x0f 0x6e. */
1524FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1525/** Opcode 0x0f 0x6f. */
1526FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1527/** Opcode 0x0f 0x70. */
1528FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1529/** Opcode 0x0f 0x71. */
1530FNIEMOP_STUB(iemOp_Grp12);
1531/** Opcode 0x0f 0x72. */
1532FNIEMOP_STUB(iemOp_Grp13);
1533/** Opcode 0x0f 0x73. */
1534FNIEMOP_STUB(iemOp_Grp14);
1535/** Opcode 0x0f 0x74. */
1536FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1537/** Opcode 0x0f 0x75. */
1538FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1539/** Opcode 0x0f 0x76. */
1540FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1541/** Opcode 0x0f 0x77. */
1542FNIEMOP_STUB(iemOp_emms);
1543/** Opcode 0x0f 0x78. */
1544FNIEMOP_STUB(iemOp_vmread);
1545/** Opcode 0x0f 0x79. */
1546FNIEMOP_STUB(iemOp_vmwrite);
1547/** Opcode 0x0f 0x7c. */
1548FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1549/** Opcode 0x0f 0x7d. */
1550FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1551/** Opcode 0x0f 0x7e. */
1552FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1553/** Opcode 0x0f 0x7f. */
1554FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1555
1556
1557/** Opcode 0x0f 0x80. */
1558FNIEMOP_DEF(iemOp_jo_Jv)
1559{
1560 IEMOP_MNEMONIC("jo Jv");
1561 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1562 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1563 {
1564 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1565 IEMOP_HLP_NO_LOCK_PREFIX();
1566
1567 IEM_MC_BEGIN(0, 0);
1568 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1569 IEM_MC_REL_JMP_S16(i16Imm);
1570 } IEM_MC_ELSE() {
1571 IEM_MC_ADVANCE_RIP();
1572 } IEM_MC_ENDIF();
1573 IEM_MC_END();
1574 }
1575 else
1576 {
1577 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1578 IEMOP_HLP_NO_LOCK_PREFIX();
1579
1580 IEM_MC_BEGIN(0, 0);
1581 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1582 IEM_MC_REL_JMP_S32(i32Imm);
1583 } IEM_MC_ELSE() {
1584 IEM_MC_ADVANCE_RIP();
1585 } IEM_MC_ENDIF();
1586 IEM_MC_END();
1587 }
1588 return VINF_SUCCESS;
1589}
1590
1591
1592/** Opcode 0x0f 0x81. */
1593FNIEMOP_DEF(iemOp_jno_Jv)
1594{
1595 IEMOP_MNEMONIC("jno Jv");
1596 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1597 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1598 {
1599 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1600 IEMOP_HLP_NO_LOCK_PREFIX();
1601
1602 IEM_MC_BEGIN(0, 0);
1603 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1604 IEM_MC_ADVANCE_RIP();
1605 } IEM_MC_ELSE() {
1606 IEM_MC_REL_JMP_S16(i16Imm);
1607 } IEM_MC_ENDIF();
1608 IEM_MC_END();
1609 }
1610 else
1611 {
1612 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1613 IEMOP_HLP_NO_LOCK_PREFIX();
1614
1615 IEM_MC_BEGIN(0, 0);
1616 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1617 IEM_MC_ADVANCE_RIP();
1618 } IEM_MC_ELSE() {
1619 IEM_MC_REL_JMP_S32(i32Imm);
1620 } IEM_MC_ENDIF();
1621 IEM_MC_END();
1622 }
1623 return VINF_SUCCESS;
1624}
1625
1626
1627/** Opcode 0x0f 0x82. */
1628FNIEMOP_DEF(iemOp_jc_Jv)
1629{
1630 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1631 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1632 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1633 {
1634 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1635 IEMOP_HLP_NO_LOCK_PREFIX();
1636
1637 IEM_MC_BEGIN(0, 0);
1638 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1639 IEM_MC_REL_JMP_S16(i16Imm);
1640 } IEM_MC_ELSE() {
1641 IEM_MC_ADVANCE_RIP();
1642 } IEM_MC_ENDIF();
1643 IEM_MC_END();
1644 }
1645 else
1646 {
1647 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1648 IEMOP_HLP_NO_LOCK_PREFIX();
1649
1650 IEM_MC_BEGIN(0, 0);
1651 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1652 IEM_MC_REL_JMP_S32(i32Imm);
1653 } IEM_MC_ELSE() {
1654 IEM_MC_ADVANCE_RIP();
1655 } IEM_MC_ENDIF();
1656 IEM_MC_END();
1657 }
1658 return VINF_SUCCESS;
1659}
1660
1661
1662/** Opcode 0x0f 0x83. */
1663FNIEMOP_DEF(iemOp_jnc_Jv)
1664{
1665 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1666 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1667 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1668 {
1669 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1670 IEMOP_HLP_NO_LOCK_PREFIX();
1671
1672 IEM_MC_BEGIN(0, 0);
1673 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1674 IEM_MC_ADVANCE_RIP();
1675 } IEM_MC_ELSE() {
1676 IEM_MC_REL_JMP_S16(i16Imm);
1677 } IEM_MC_ENDIF();
1678 IEM_MC_END();
1679 }
1680 else
1681 {
1682 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1683 IEMOP_HLP_NO_LOCK_PREFIX();
1684
1685 IEM_MC_BEGIN(0, 0);
1686 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1687 IEM_MC_ADVANCE_RIP();
1688 } IEM_MC_ELSE() {
1689 IEM_MC_REL_JMP_S32(i32Imm);
1690 } IEM_MC_ENDIF();
1691 IEM_MC_END();
1692 }
1693 return VINF_SUCCESS;
1694}
1695
1696
1697/** Opcode 0x0f 0x84. */
1698FNIEMOP_DEF(iemOp_je_Jv)
1699{
1700 IEMOP_MNEMONIC("je/jz Jv");
1701 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1702 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1703 {
1704 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1705 IEMOP_HLP_NO_LOCK_PREFIX();
1706
1707 IEM_MC_BEGIN(0, 0);
1708 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1709 IEM_MC_REL_JMP_S16(i16Imm);
1710 } IEM_MC_ELSE() {
1711 IEM_MC_ADVANCE_RIP();
1712 } IEM_MC_ENDIF();
1713 IEM_MC_END();
1714 }
1715 else
1716 {
1717 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1718 IEMOP_HLP_NO_LOCK_PREFIX();
1719
1720 IEM_MC_BEGIN(0, 0);
1721 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1722 IEM_MC_REL_JMP_S32(i32Imm);
1723 } IEM_MC_ELSE() {
1724 IEM_MC_ADVANCE_RIP();
1725 } IEM_MC_ENDIF();
1726 IEM_MC_END();
1727 }
1728 return VINF_SUCCESS;
1729}
1730
1731
1732/** Opcode 0x0f 0x85. */
1733FNIEMOP_DEF(iemOp_jne_Jv)
1734{
1735 IEMOP_MNEMONIC("jne/jnz Jv");
1736 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1737 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1738 {
1739 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1740 IEMOP_HLP_NO_LOCK_PREFIX();
1741
1742 IEM_MC_BEGIN(0, 0);
1743 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1744 IEM_MC_ADVANCE_RIP();
1745 } IEM_MC_ELSE() {
1746 IEM_MC_REL_JMP_S16(i16Imm);
1747 } IEM_MC_ENDIF();
1748 IEM_MC_END();
1749 }
1750 else
1751 {
1752 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1753 IEMOP_HLP_NO_LOCK_PREFIX();
1754
1755 IEM_MC_BEGIN(0, 0);
1756 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1757 IEM_MC_ADVANCE_RIP();
1758 } IEM_MC_ELSE() {
1759 IEM_MC_REL_JMP_S32(i32Imm);
1760 } IEM_MC_ENDIF();
1761 IEM_MC_END();
1762 }
1763 return VINF_SUCCESS;
1764}
1765
1766
1767/** Opcode 0x0f 0x86. */
1768FNIEMOP_DEF(iemOp_jbe_Jv)
1769{
1770 IEMOP_MNEMONIC("jbe/jna Jv");
1771 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1772 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1773 {
1774 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1775 IEMOP_HLP_NO_LOCK_PREFIX();
1776
1777 IEM_MC_BEGIN(0, 0);
1778 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1779 IEM_MC_REL_JMP_S16(i16Imm);
1780 } IEM_MC_ELSE() {
1781 IEM_MC_ADVANCE_RIP();
1782 } IEM_MC_ENDIF();
1783 IEM_MC_END();
1784 }
1785 else
1786 {
1787 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1788 IEMOP_HLP_NO_LOCK_PREFIX();
1789
1790 IEM_MC_BEGIN(0, 0);
1791 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1792 IEM_MC_REL_JMP_S32(i32Imm);
1793 } IEM_MC_ELSE() {
1794 IEM_MC_ADVANCE_RIP();
1795 } IEM_MC_ENDIF();
1796 IEM_MC_END();
1797 }
1798 return VINF_SUCCESS;
1799}
1800
1801
1802/** Opcode 0x0f 0x87. */
1803FNIEMOP_DEF(iemOp_jnbe_Jv)
1804{
1805 IEMOP_MNEMONIC("jnbe/ja Jv");
1806 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1807 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1808 {
1809 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1810 IEMOP_HLP_NO_LOCK_PREFIX();
1811
1812 IEM_MC_BEGIN(0, 0);
1813 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1814 IEM_MC_ADVANCE_RIP();
1815 } IEM_MC_ELSE() {
1816 IEM_MC_REL_JMP_S16(i16Imm);
1817 } IEM_MC_ENDIF();
1818 IEM_MC_END();
1819 }
1820 else
1821 {
1822 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1823 IEMOP_HLP_NO_LOCK_PREFIX();
1824
1825 IEM_MC_BEGIN(0, 0);
1826 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1827 IEM_MC_ADVANCE_RIP();
1828 } IEM_MC_ELSE() {
1829 IEM_MC_REL_JMP_S32(i32Imm);
1830 } IEM_MC_ENDIF();
1831 IEM_MC_END();
1832 }
1833 return VINF_SUCCESS;
1834}
1835
1836
1837/** Opcode 0x0f 0x88. */
1838FNIEMOP_DEF(iemOp_js_Jv)
1839{
1840 IEMOP_MNEMONIC("js Jv");
1841 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1842 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1843 {
1844 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1845 IEMOP_HLP_NO_LOCK_PREFIX();
1846
1847 IEM_MC_BEGIN(0, 0);
1848 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1849 IEM_MC_REL_JMP_S16(i16Imm);
1850 } IEM_MC_ELSE() {
1851 IEM_MC_ADVANCE_RIP();
1852 } IEM_MC_ENDIF();
1853 IEM_MC_END();
1854 }
1855 else
1856 {
1857 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1858 IEMOP_HLP_NO_LOCK_PREFIX();
1859
1860 IEM_MC_BEGIN(0, 0);
1861 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1862 IEM_MC_REL_JMP_S32(i32Imm);
1863 } IEM_MC_ELSE() {
1864 IEM_MC_ADVANCE_RIP();
1865 } IEM_MC_ENDIF();
1866 IEM_MC_END();
1867 }
1868 return VINF_SUCCESS;
1869}
1870
1871
1872/** Opcode 0x0f 0x89. */
1873FNIEMOP_DEF(iemOp_jns_Jv)
1874{
1875 IEMOP_MNEMONIC("jns Jv");
1876 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1877 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1878 {
1879 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1880 IEMOP_HLP_NO_LOCK_PREFIX();
1881
1882 IEM_MC_BEGIN(0, 0);
1883 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1884 IEM_MC_ADVANCE_RIP();
1885 } IEM_MC_ELSE() {
1886 IEM_MC_REL_JMP_S16(i16Imm);
1887 } IEM_MC_ENDIF();
1888 IEM_MC_END();
1889 }
1890 else
1891 {
1892 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1893 IEMOP_HLP_NO_LOCK_PREFIX();
1894
1895 IEM_MC_BEGIN(0, 0);
1896 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1897 IEM_MC_ADVANCE_RIP();
1898 } IEM_MC_ELSE() {
1899 IEM_MC_REL_JMP_S32(i32Imm);
1900 } IEM_MC_ENDIF();
1901 IEM_MC_END();
1902 }
1903 return VINF_SUCCESS;
1904}
1905
1906
1907/** Opcode 0x0f 0x8a. */
1908FNIEMOP_DEF(iemOp_jp_Jv)
1909{
1910 IEMOP_MNEMONIC("jp Jv");
1911 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1912 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1913 {
1914 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1915 IEMOP_HLP_NO_LOCK_PREFIX();
1916
1917 IEM_MC_BEGIN(0, 0);
1918 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1919 IEM_MC_REL_JMP_S16(i16Imm);
1920 } IEM_MC_ELSE() {
1921 IEM_MC_ADVANCE_RIP();
1922 } IEM_MC_ENDIF();
1923 IEM_MC_END();
1924 }
1925 else
1926 {
1927 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1928 IEMOP_HLP_NO_LOCK_PREFIX();
1929
1930 IEM_MC_BEGIN(0, 0);
1931 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1932 IEM_MC_REL_JMP_S32(i32Imm);
1933 } IEM_MC_ELSE() {
1934 IEM_MC_ADVANCE_RIP();
1935 } IEM_MC_ENDIF();
1936 IEM_MC_END();
1937 }
1938 return VINF_SUCCESS;
1939}
1940
1941
1942/** Opcode 0x0f 0x8b. */
1943FNIEMOP_DEF(iemOp_jnp_Jv)
1944{
1945 IEMOP_MNEMONIC("jo Jv");
1946 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1947 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1948 {
1949 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1950 IEMOP_HLP_NO_LOCK_PREFIX();
1951
1952 IEM_MC_BEGIN(0, 0);
1953 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1954 IEM_MC_ADVANCE_RIP();
1955 } IEM_MC_ELSE() {
1956 IEM_MC_REL_JMP_S16(i16Imm);
1957 } IEM_MC_ENDIF();
1958 IEM_MC_END();
1959 }
1960 else
1961 {
1962 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1963 IEMOP_HLP_NO_LOCK_PREFIX();
1964
1965 IEM_MC_BEGIN(0, 0);
1966 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1967 IEM_MC_ADVANCE_RIP();
1968 } IEM_MC_ELSE() {
1969 IEM_MC_REL_JMP_S32(i32Imm);
1970 } IEM_MC_ENDIF();
1971 IEM_MC_END();
1972 }
1973 return VINF_SUCCESS;
1974}
1975
1976
1977/** Opcode 0x0f 0x8c. */
1978FNIEMOP_DEF(iemOp_jl_Jv)
1979{
1980 IEMOP_MNEMONIC("jl/jnge Jv");
1981 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1982 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1983 {
1984 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1985 IEMOP_HLP_NO_LOCK_PREFIX();
1986
1987 IEM_MC_BEGIN(0, 0);
1988 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1989 IEM_MC_REL_JMP_S16(i16Imm);
1990 } IEM_MC_ELSE() {
1991 IEM_MC_ADVANCE_RIP();
1992 } IEM_MC_ENDIF();
1993 IEM_MC_END();
1994 }
1995 else
1996 {
1997 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1998 IEMOP_HLP_NO_LOCK_PREFIX();
1999
2000 IEM_MC_BEGIN(0, 0);
2001 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2002 IEM_MC_REL_JMP_S32(i32Imm);
2003 } IEM_MC_ELSE() {
2004 IEM_MC_ADVANCE_RIP();
2005 } IEM_MC_ENDIF();
2006 IEM_MC_END();
2007 }
2008 return VINF_SUCCESS;
2009}
2010
2011
2012/** Opcode 0x0f 0x8d. */
2013FNIEMOP_DEF(iemOp_jnl_Jv)
2014{
2015 IEMOP_MNEMONIC("jnl/jge Jv");
2016 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2017 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
2018 {
2019 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
2020 IEMOP_HLP_NO_LOCK_PREFIX();
2021
2022 IEM_MC_BEGIN(0, 0);
2023 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2024 IEM_MC_ADVANCE_RIP();
2025 } IEM_MC_ELSE() {
2026 IEM_MC_REL_JMP_S16(i16Imm);
2027 } IEM_MC_ENDIF();
2028 IEM_MC_END();
2029 }
2030 else
2031 {
2032 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
2033 IEMOP_HLP_NO_LOCK_PREFIX();
2034
2035 IEM_MC_BEGIN(0, 0);
2036 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2037 IEM_MC_ADVANCE_RIP();
2038 } IEM_MC_ELSE() {
2039 IEM_MC_REL_JMP_S32(i32Imm);
2040 } IEM_MC_ENDIF();
2041 IEM_MC_END();
2042 }
2043 return VINF_SUCCESS;
2044}
2045
2046
2047/** Opcode 0x0f 0x8e. */
2048FNIEMOP_DEF(iemOp_jle_Jv)
2049{
2050 IEMOP_MNEMONIC("jle/jng Jv");
2051 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2052 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
2053 {
2054 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
2055 IEMOP_HLP_NO_LOCK_PREFIX();
2056
2057 IEM_MC_BEGIN(0, 0);
2058 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2059 IEM_MC_REL_JMP_S16(i16Imm);
2060 } IEM_MC_ELSE() {
2061 IEM_MC_ADVANCE_RIP();
2062 } IEM_MC_ENDIF();
2063 IEM_MC_END();
2064 }
2065 else
2066 {
2067 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
2068 IEMOP_HLP_NO_LOCK_PREFIX();
2069
2070 IEM_MC_BEGIN(0, 0);
2071 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2072 IEM_MC_REL_JMP_S32(i32Imm);
2073 } IEM_MC_ELSE() {
2074 IEM_MC_ADVANCE_RIP();
2075 } IEM_MC_ENDIF();
2076 IEM_MC_END();
2077 }
2078 return VINF_SUCCESS;
2079}
2080
2081
2082/** Opcode 0x0f 0x8f. */
2083FNIEMOP_DEF(iemOp_jnle_Jv)
2084{
2085 IEMOP_MNEMONIC("jnle/jg Jv");
2086 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2087 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
2088 {
2089 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
2090 IEMOP_HLP_NO_LOCK_PREFIX();
2091
2092 IEM_MC_BEGIN(0, 0);
2093 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2094 IEM_MC_ADVANCE_RIP();
2095 } IEM_MC_ELSE() {
2096 IEM_MC_REL_JMP_S16(i16Imm);
2097 } IEM_MC_ENDIF();
2098 IEM_MC_END();
2099 }
2100 else
2101 {
2102 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
2103 IEMOP_HLP_NO_LOCK_PREFIX();
2104
2105 IEM_MC_BEGIN(0, 0);
2106 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2107 IEM_MC_ADVANCE_RIP();
2108 } IEM_MC_ELSE() {
2109 IEM_MC_REL_JMP_S32(i32Imm);
2110 } IEM_MC_ENDIF();
2111 IEM_MC_END();
2112 }
2113 return VINF_SUCCESS;
2114}
2115
2116
2117/** Opcode 0x0f 0x90. */
2118FNIEMOP_DEF(iemOp_seto_Eb)
2119{
2120 IEMOP_MNEMONIC("seto Eb");
2121 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2122 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2123
2124 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2125 * any way. AMD says it's "unused", whatever that means. We're
2126 * ignoring for now. */
2127 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2128 {
2129 /* register target */
2130 IEM_MC_BEGIN(0, 0);
2131 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2132 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2133 } IEM_MC_ELSE() {
2134 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2135 } IEM_MC_ENDIF();
2136 IEM_MC_ADVANCE_RIP();
2137 IEM_MC_END();
2138 }
2139 else
2140 {
2141 /* memory target */
2142 IEM_MC_BEGIN(0, 1);
2143 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2144 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2145 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2146 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2147 } IEM_MC_ELSE() {
2148 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2149 } IEM_MC_ENDIF();
2150 IEM_MC_ADVANCE_RIP();
2151 IEM_MC_END();
2152 }
2153 return VINF_SUCCESS;
2154}
2155
2156
2157/** Opcode 0x0f 0x91. */
2158FNIEMOP_DEF(iemOp_setno_Eb)
2159{
2160 IEMOP_MNEMONIC("setno Eb");
2161 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2162 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2163
2164 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2165 * any way. AMD says it's "unused", whatever that means. We're
2166 * ignoring for now. */
2167 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2168 {
2169 /* register target */
2170 IEM_MC_BEGIN(0, 0);
2171 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2172 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2173 } IEM_MC_ELSE() {
2174 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2175 } IEM_MC_ENDIF();
2176 IEM_MC_ADVANCE_RIP();
2177 IEM_MC_END();
2178 }
2179 else
2180 {
2181 /* memory target */
2182 IEM_MC_BEGIN(0, 1);
2183 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2184 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2185 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2186 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2187 } IEM_MC_ELSE() {
2188 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2189 } IEM_MC_ENDIF();
2190 IEM_MC_ADVANCE_RIP();
2191 IEM_MC_END();
2192 }
2193 return VINF_SUCCESS;
2194}
2195
2196
2197/** Opcode 0x0f 0x92. */
2198FNIEMOP_DEF(iemOp_setc_Eb)
2199{
2200 IEMOP_MNEMONIC("setc Eb");
2201 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2202 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2203
2204 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2205 * any way. AMD says it's "unused", whatever that means. We're
2206 * ignoring for now. */
2207 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2208 {
2209 /* register target */
2210 IEM_MC_BEGIN(0, 0);
2211 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2212 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2213 } IEM_MC_ELSE() {
2214 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2215 } IEM_MC_ENDIF();
2216 IEM_MC_ADVANCE_RIP();
2217 IEM_MC_END();
2218 }
2219 else
2220 {
2221 /* memory target */
2222 IEM_MC_BEGIN(0, 1);
2223 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2224 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2225 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2226 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2227 } IEM_MC_ELSE() {
2228 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2229 } IEM_MC_ENDIF();
2230 IEM_MC_ADVANCE_RIP();
2231 IEM_MC_END();
2232 }
2233 return VINF_SUCCESS;
2234}
2235
2236
2237/** Opcode 0x0f 0x93. */
2238FNIEMOP_DEF(iemOp_setnc_Eb)
2239{
2240 IEMOP_MNEMONIC("setnc Eb");
2241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2243
2244 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2245 * any way. AMD says it's "unused", whatever that means. We're
2246 * ignoring for now. */
2247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2248 {
2249 /* register target */
2250 IEM_MC_BEGIN(0, 0);
2251 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2252 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2253 } IEM_MC_ELSE() {
2254 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2255 } IEM_MC_ENDIF();
2256 IEM_MC_ADVANCE_RIP();
2257 IEM_MC_END();
2258 }
2259 else
2260 {
2261 /* memory target */
2262 IEM_MC_BEGIN(0, 1);
2263 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2264 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2265 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2266 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2267 } IEM_MC_ELSE() {
2268 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2269 } IEM_MC_ENDIF();
2270 IEM_MC_ADVANCE_RIP();
2271 IEM_MC_END();
2272 }
2273 return VINF_SUCCESS;
2274}
2275
2276
2277/** Opcode 0x0f 0x94. */
2278FNIEMOP_DEF(iemOp_sete_Eb)
2279{
2280 IEMOP_MNEMONIC("sete Eb");
2281 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2282 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2283
2284 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2285 * any way. AMD says it's "unused", whatever that means. We're
2286 * ignoring for now. */
2287 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2288 {
2289 /* register target */
2290 IEM_MC_BEGIN(0, 0);
2291 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2292 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2293 } IEM_MC_ELSE() {
2294 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2295 } IEM_MC_ENDIF();
2296 IEM_MC_ADVANCE_RIP();
2297 IEM_MC_END();
2298 }
2299 else
2300 {
2301 /* memory target */
2302 IEM_MC_BEGIN(0, 1);
2303 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2304 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2305 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2306 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2307 } IEM_MC_ELSE() {
2308 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2309 } IEM_MC_ENDIF();
2310 IEM_MC_ADVANCE_RIP();
2311 IEM_MC_END();
2312 }
2313 return VINF_SUCCESS;
2314}
2315
2316
2317/** Opcode 0x0f 0x95. */
2318FNIEMOP_DEF(iemOp_setne_Eb)
2319{
2320 IEMOP_MNEMONIC("setne Eb");
2321 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2322 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2323
2324 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2325 * any way. AMD says it's "unused", whatever that means. We're
2326 * ignoring for now. */
2327 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2328 {
2329 /* register target */
2330 IEM_MC_BEGIN(0, 0);
2331 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2332 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2333 } IEM_MC_ELSE() {
2334 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2335 } IEM_MC_ENDIF();
2336 IEM_MC_ADVANCE_RIP();
2337 IEM_MC_END();
2338 }
2339 else
2340 {
2341 /* memory target */
2342 IEM_MC_BEGIN(0, 1);
2343 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2344 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2345 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2346 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2347 } IEM_MC_ELSE() {
2348 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2349 } IEM_MC_ENDIF();
2350 IEM_MC_ADVANCE_RIP();
2351 IEM_MC_END();
2352 }
2353 return VINF_SUCCESS;
2354}
2355
2356
2357/** Opcode 0x0f 0x96. */
2358FNIEMOP_DEF(iemOp_setbe_Eb)
2359{
2360 IEMOP_MNEMONIC("setbe Eb");
2361 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2362 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2363
2364 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2365 * any way. AMD says it's "unused", whatever that means. We're
2366 * ignoring for now. */
2367 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2368 {
2369 /* register target */
2370 IEM_MC_BEGIN(0, 0);
2371 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2372 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2373 } IEM_MC_ELSE() {
2374 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2375 } IEM_MC_ENDIF();
2376 IEM_MC_ADVANCE_RIP();
2377 IEM_MC_END();
2378 }
2379 else
2380 {
2381 /* memory target */
2382 IEM_MC_BEGIN(0, 1);
2383 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2385 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2386 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2387 } IEM_MC_ELSE() {
2388 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2389 } IEM_MC_ENDIF();
2390 IEM_MC_ADVANCE_RIP();
2391 IEM_MC_END();
2392 }
2393 return VINF_SUCCESS;
2394}
2395
2396
2397/** Opcode 0x0f 0x97. */
2398FNIEMOP_DEF(iemOp_setnbe_Eb)
2399{
2400 IEMOP_MNEMONIC("setnbe Eb");
2401 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2402 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2403
2404 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2405 * any way. AMD says it's "unused", whatever that means. We're
2406 * ignoring for now. */
2407 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2408 {
2409 /* register target */
2410 IEM_MC_BEGIN(0, 0);
2411 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2412 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2413 } IEM_MC_ELSE() {
2414 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2415 } IEM_MC_ENDIF();
2416 IEM_MC_ADVANCE_RIP();
2417 IEM_MC_END();
2418 }
2419 else
2420 {
2421 /* memory target */
2422 IEM_MC_BEGIN(0, 1);
2423 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2424 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2425 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2426 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2427 } IEM_MC_ELSE() {
2428 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2429 } IEM_MC_ENDIF();
2430 IEM_MC_ADVANCE_RIP();
2431 IEM_MC_END();
2432 }
2433 return VINF_SUCCESS;
2434}
2435
2436
2437/** Opcode 0x0f 0x98. */
2438FNIEMOP_DEF(iemOp_sets_Eb)
2439{
2440 IEMOP_MNEMONIC("sets Eb");
2441 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2442 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2443
2444 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2445 * any way. AMD says it's "unused", whatever that means. We're
2446 * ignoring for now. */
2447 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2448 {
2449 /* register target */
2450 IEM_MC_BEGIN(0, 0);
2451 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2452 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2453 } IEM_MC_ELSE() {
2454 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2455 } IEM_MC_ENDIF();
2456 IEM_MC_ADVANCE_RIP();
2457 IEM_MC_END();
2458 }
2459 else
2460 {
2461 /* memory target */
2462 IEM_MC_BEGIN(0, 1);
2463 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2464 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2465 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2466 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2467 } IEM_MC_ELSE() {
2468 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2469 } IEM_MC_ENDIF();
2470 IEM_MC_ADVANCE_RIP();
2471 IEM_MC_END();
2472 }
2473 return VINF_SUCCESS;
2474}
2475
2476
2477/** Opcode 0x0f 0x99. */
2478FNIEMOP_DEF(iemOp_setns_Eb)
2479{
2480 IEMOP_MNEMONIC("setns Eb");
2481 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2482 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2483
2484 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2485 * any way. AMD says it's "unused", whatever that means. We're
2486 * ignoring for now. */
2487 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2488 {
2489 /* register target */
2490 IEM_MC_BEGIN(0, 0);
2491 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2492 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2493 } IEM_MC_ELSE() {
2494 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2495 } IEM_MC_ENDIF();
2496 IEM_MC_ADVANCE_RIP();
2497 IEM_MC_END();
2498 }
2499 else
2500 {
2501 /* memory target */
2502 IEM_MC_BEGIN(0, 1);
2503 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2504 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2505 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2506 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2507 } IEM_MC_ELSE() {
2508 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2509 } IEM_MC_ENDIF();
2510 IEM_MC_ADVANCE_RIP();
2511 IEM_MC_END();
2512 }
2513 return VINF_SUCCESS;
2514}
2515
2516
2517/** Opcode 0x0f 0x9a. */
2518FNIEMOP_DEF(iemOp_setp_Eb)
2519{
2520 IEMOP_MNEMONIC("setnp Eb");
2521 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2522 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2523
2524 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2525 * any way. AMD says it's "unused", whatever that means. We're
2526 * ignoring for now. */
2527 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2528 {
2529 /* register target */
2530 IEM_MC_BEGIN(0, 0);
2531 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2532 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2533 } IEM_MC_ELSE() {
2534 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2535 } IEM_MC_ENDIF();
2536 IEM_MC_ADVANCE_RIP();
2537 IEM_MC_END();
2538 }
2539 else
2540 {
2541 /* memory target */
2542 IEM_MC_BEGIN(0, 1);
2543 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2544 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2545 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2546 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2547 } IEM_MC_ELSE() {
2548 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2549 } IEM_MC_ENDIF();
2550 IEM_MC_ADVANCE_RIP();
2551 IEM_MC_END();
2552 }
2553 return VINF_SUCCESS;
2554}
2555
2556
2557/** Opcode 0x0f 0x9b. */
2558FNIEMOP_DEF(iemOp_setnp_Eb)
2559{
2560 IEMOP_MNEMONIC("setnp Eb");
2561 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2562 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2563
2564 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2565 * any way. AMD says it's "unused", whatever that means. We're
2566 * ignoring for now. */
2567 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2568 {
2569 /* register target */
2570 IEM_MC_BEGIN(0, 0);
2571 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2572 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2573 } IEM_MC_ELSE() {
2574 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2575 } IEM_MC_ENDIF();
2576 IEM_MC_ADVANCE_RIP();
2577 IEM_MC_END();
2578 }
2579 else
2580 {
2581 /* memory target */
2582 IEM_MC_BEGIN(0, 1);
2583 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2584 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2585 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2586 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2587 } IEM_MC_ELSE() {
2588 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2589 } IEM_MC_ENDIF();
2590 IEM_MC_ADVANCE_RIP();
2591 IEM_MC_END();
2592 }
2593 return VINF_SUCCESS;
2594}
2595
2596
2597/** Opcode 0x0f 0x9c. */
2598FNIEMOP_DEF(iemOp_setl_Eb)
2599{
2600 IEMOP_MNEMONIC("setl Eb");
2601 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2602 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2603
2604 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2605 * any way. AMD says it's "unused", whatever that means. We're
2606 * ignoring for now. */
2607 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2608 {
2609 /* register target */
2610 IEM_MC_BEGIN(0, 0);
2611 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2612 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2613 } IEM_MC_ELSE() {
2614 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2615 } IEM_MC_ENDIF();
2616 IEM_MC_ADVANCE_RIP();
2617 IEM_MC_END();
2618 }
2619 else
2620 {
2621 /* memory target */
2622 IEM_MC_BEGIN(0, 1);
2623 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2624 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2625 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2626 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2627 } IEM_MC_ELSE() {
2628 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2629 } IEM_MC_ENDIF();
2630 IEM_MC_ADVANCE_RIP();
2631 IEM_MC_END();
2632 }
2633 return VINF_SUCCESS;
2634}
2635
2636
2637/** Opcode 0x0f 0x9d. */
2638FNIEMOP_DEF(iemOp_setnl_Eb)
2639{
2640 IEMOP_MNEMONIC("setnl Eb");
2641 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2642 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2643
2644 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2645 * any way. AMD says it's "unused", whatever that means. We're
2646 * ignoring for now. */
2647 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2648 {
2649 /* register target */
2650 IEM_MC_BEGIN(0, 0);
2651 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2652 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2653 } IEM_MC_ELSE() {
2654 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2655 } IEM_MC_ENDIF();
2656 IEM_MC_ADVANCE_RIP();
2657 IEM_MC_END();
2658 }
2659 else
2660 {
2661 /* memory target */
2662 IEM_MC_BEGIN(0, 1);
2663 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2664 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2665 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2666 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2667 } IEM_MC_ELSE() {
2668 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2669 } IEM_MC_ENDIF();
2670 IEM_MC_ADVANCE_RIP();
2671 IEM_MC_END();
2672 }
2673 return VINF_SUCCESS;
2674}
2675
2676
2677/** Opcode 0x0f 0x9e. */
2678FNIEMOP_DEF(iemOp_setle_Eb)
2679{
2680 IEMOP_MNEMONIC("setle Eb");
2681 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2682 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2683
2684 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2685 * any way. AMD says it's "unused", whatever that means. We're
2686 * ignoring for now. */
2687 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2688 {
2689 /* register target */
2690 IEM_MC_BEGIN(0, 0);
2691 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2692 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2693 } IEM_MC_ELSE() {
2694 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2695 } IEM_MC_ENDIF();
2696 IEM_MC_ADVANCE_RIP();
2697 IEM_MC_END();
2698 }
2699 else
2700 {
2701 /* memory target */
2702 IEM_MC_BEGIN(0, 1);
2703 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2704 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2705 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2706 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2707 } IEM_MC_ELSE() {
2708 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2709 } IEM_MC_ENDIF();
2710 IEM_MC_ADVANCE_RIP();
2711 IEM_MC_END();
2712 }
2713 return VINF_SUCCESS;
2714}
2715
2716
2717/** Opcode 0x0f 0x9f. */
2718FNIEMOP_DEF(iemOp_setnle_Eb)
2719{
2720 IEMOP_MNEMONIC("setnle Eb");
2721 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2722 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2723
2724 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2725 * any way. AMD says it's "unused", whatever that means. We're
2726 * ignoring for now. */
2727 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2728 {
2729 /* register target */
2730 IEM_MC_BEGIN(0, 0);
2731 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2732 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2733 } IEM_MC_ELSE() {
2734 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2735 } IEM_MC_ENDIF();
2736 IEM_MC_ADVANCE_RIP();
2737 IEM_MC_END();
2738 }
2739 else
2740 {
2741 /* memory target */
2742 IEM_MC_BEGIN(0, 1);
2743 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2744 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2745 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2746 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2747 } IEM_MC_ELSE() {
2748 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2749 } IEM_MC_ENDIF();
2750 IEM_MC_ADVANCE_RIP();
2751 IEM_MC_END();
2752 }
2753 return VINF_SUCCESS;
2754}
2755
2756
2757/**
2758 * Common 'push segment-register' helper.
2759 */
2760FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2761{
2762 IEMOP_HLP_NO_LOCK_PREFIX();
2763 if (iReg < X86_SREG_FS)
2764 IEMOP_HLP_NO_64BIT();
2765 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2766
2767 switch (pIemCpu->enmEffOpSize)
2768 {
2769 case IEMMODE_16BIT:
2770 IEM_MC_BEGIN(0, 1);
2771 IEM_MC_LOCAL(uint16_t, u16Value);
2772 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2773 IEM_MC_PUSH_U16(u16Value);
2774 IEM_MC_ADVANCE_RIP();
2775 IEM_MC_END();
2776 break;
2777
2778 case IEMMODE_32BIT:
2779 IEM_MC_BEGIN(0, 1);
2780 IEM_MC_LOCAL(uint32_t, u32Value);
2781 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2782 IEM_MC_PUSH_U32(u32Value);
2783 IEM_MC_ADVANCE_RIP();
2784 IEM_MC_END();
2785 break;
2786
2787 case IEMMODE_64BIT:
2788 IEM_MC_BEGIN(0, 1);
2789 IEM_MC_LOCAL(uint64_t, u64Value);
2790 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2791 IEM_MC_PUSH_U64(u64Value);
2792 IEM_MC_ADVANCE_RIP();
2793 IEM_MC_END();
2794 break;
2795 }
2796
2797 return VINF_SUCCESS;
2798}
2799
2800
2801/** Opcode 0x0f 0xa0. */
2802FNIEMOP_DEF(iemOp_push_fs)
2803{
2804 IEMOP_MNEMONIC("push fs");
2805 IEMOP_HLP_NO_LOCK_PREFIX();
2806 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2807}
2808
2809
2810/** Opcode 0x0f 0xa1. */
2811FNIEMOP_DEF(iemOp_pop_fs)
2812{
2813 IEMOP_MNEMONIC("pop fs");
2814 IEMOP_HLP_NO_LOCK_PREFIX();
2815 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2816}
2817
2818
2819/** Opcode 0x0f 0xa2. */
2820FNIEMOP_DEF(iemOp_cpuid)
2821{
2822 IEMOP_MNEMONIC("cpuid");
2823 IEMOP_HLP_NO_LOCK_PREFIX();
2824 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2825}
2826
2827
2828/**
2829 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2830 * iemOp_bts_Ev_Gv.
2831 */
2832FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2833{
2834 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2835 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2836
2837 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2838 {
2839 /* register destination. */
2840 IEMOP_HLP_NO_LOCK_PREFIX();
2841 switch (pIemCpu->enmEffOpSize)
2842 {
2843 case IEMMODE_16BIT:
2844 IEM_MC_BEGIN(3, 0);
2845 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2846 IEM_MC_ARG(uint16_t, u16Src, 1);
2847 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2848
2849 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2850 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2851 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2852 IEM_MC_REF_EFLAGS(pEFlags);
2853 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2854
2855 IEM_MC_ADVANCE_RIP();
2856 IEM_MC_END();
2857 return VINF_SUCCESS;
2858
2859 case IEMMODE_32BIT:
2860 IEM_MC_BEGIN(3, 0);
2861 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2862 IEM_MC_ARG(uint32_t, u32Src, 1);
2863 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2864
2865 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2866 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2867 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2868 IEM_MC_REF_EFLAGS(pEFlags);
2869 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2870
2871 IEM_MC_ADVANCE_RIP();
2872 IEM_MC_END();
2873 return VINF_SUCCESS;
2874
2875 case IEMMODE_64BIT:
2876 IEM_MC_BEGIN(3, 0);
2877 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2878 IEM_MC_ARG(uint64_t, u64Src, 1);
2879 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2880
2881 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2882 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2883 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2884 IEM_MC_REF_EFLAGS(pEFlags);
2885 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2886
2887 IEM_MC_ADVANCE_RIP();
2888 IEM_MC_END();
2889 return VINF_SUCCESS;
2890
2891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2892 }
2893 }
2894 else
2895 {
2896 /* memory destination. */
2897
2898 uint32_t fAccess;
2899 if (pImpl->pfnLockedU16)
2900 fAccess = IEM_ACCESS_DATA_RW;
2901 else /* BT */
2902 {
2903 IEMOP_HLP_NO_LOCK_PREFIX();
2904 fAccess = IEM_ACCESS_DATA_R;
2905 }
2906
2907 /** @todo test negative bit offsets! */
2908 switch (pIemCpu->enmEffOpSize)
2909 {
2910 case IEMMODE_16BIT:
2911 IEM_MC_BEGIN(3, 2);
2912 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2913 IEM_MC_ARG(uint16_t, u16Src, 1);
2914 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2915 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2916 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2917
2918 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2919 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2920 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2921 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2922 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2923 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2924 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2925 IEM_MC_FETCH_EFLAGS(EFlags);
2926
2927 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2928 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2929 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2930 else
2931 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2932 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2933
2934 IEM_MC_COMMIT_EFLAGS(EFlags);
2935 IEM_MC_ADVANCE_RIP();
2936 IEM_MC_END();
2937 return VINF_SUCCESS;
2938
2939 case IEMMODE_32BIT:
2940 IEM_MC_BEGIN(3, 2);
2941 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2942 IEM_MC_ARG(uint32_t, u32Src, 1);
2943 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2944 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2945 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2946
2947 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2948 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2949 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2950 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2951 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2952 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2953 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2954 IEM_MC_FETCH_EFLAGS(EFlags);
2955
2956 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2957 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2958 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2959 else
2960 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2961 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2962
2963 IEM_MC_COMMIT_EFLAGS(EFlags);
2964 IEM_MC_ADVANCE_RIP();
2965 IEM_MC_END();
2966 return VINF_SUCCESS;
2967
2968 case IEMMODE_64BIT:
2969 IEM_MC_BEGIN(3, 2);
2970 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2971 IEM_MC_ARG(uint64_t, u64Src, 1);
2972 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2973 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2974 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2975
2976 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2977 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2978 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2979 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2980 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2981 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2982 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2983 IEM_MC_FETCH_EFLAGS(EFlags);
2984
2985 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2986 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2987 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2988 else
2989 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2990 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2991
2992 IEM_MC_COMMIT_EFLAGS(EFlags);
2993 IEM_MC_ADVANCE_RIP();
2994 IEM_MC_END();
2995 return VINF_SUCCESS;
2996
2997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2998 }
2999 }
3000}
3001
3002
3003/** Opcode 0x0f 0xa3. */
3004FNIEMOP_DEF(iemOp_bt_Ev_Gv)
3005{
3006 IEMOP_MNEMONIC("bt Gv,Gv");
3007 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
3008}
3009
3010
3011/**
3012 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
3013 */
3014FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
3015{
3016 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3017 IEMOP_HLP_NO_LOCK_PREFIX();
3018 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3019
3020 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3021 {
3022 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3023 IEMOP_HLP_NO_LOCK_PREFIX();
3024
3025 switch (pIemCpu->enmEffOpSize)
3026 {
3027 case IEMMODE_16BIT:
3028 IEM_MC_BEGIN(4, 0);
3029 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3030 IEM_MC_ARG(uint16_t, u16Src, 1);
3031 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
3032 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3033
3034 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3035 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3036 IEM_MC_REF_EFLAGS(pEFlags);
3037 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3038
3039 IEM_MC_ADVANCE_RIP();
3040 IEM_MC_END();
3041 return VINF_SUCCESS;
3042
3043 case IEMMODE_32BIT:
3044 IEM_MC_BEGIN(4, 0);
3045 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3046 IEM_MC_ARG(uint32_t, u32Src, 1);
3047 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
3048 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3049
3050 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3051 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3052 IEM_MC_REF_EFLAGS(pEFlags);
3053 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3054
3055 IEM_MC_ADVANCE_RIP();
3056 IEM_MC_END();
3057 return VINF_SUCCESS;
3058
3059 case IEMMODE_64BIT:
3060 IEM_MC_BEGIN(4, 0);
3061 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3062 IEM_MC_ARG(uint64_t, u64Src, 1);
3063 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
3064 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3065
3066 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3067 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3068 IEM_MC_REF_EFLAGS(pEFlags);
3069 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3070
3071 IEM_MC_ADVANCE_RIP();
3072 IEM_MC_END();
3073 return VINF_SUCCESS;
3074
3075 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3076 }
3077 }
3078 else
3079 {
3080 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3081
3082 switch (pIemCpu->enmEffOpSize)
3083 {
3084 case IEMMODE_16BIT:
3085 IEM_MC_BEGIN(4, 2);
3086 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3087 IEM_MC_ARG(uint16_t, u16Src, 1);
3088 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3089 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3090 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3091
3092 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3093 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3094 IEM_MC_ASSIGN(cShiftArg, cShift);
3095 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3096 IEM_MC_FETCH_EFLAGS(EFlags);
3097 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3098 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3099
3100 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3101 IEM_MC_COMMIT_EFLAGS(EFlags);
3102 IEM_MC_ADVANCE_RIP();
3103 IEM_MC_END();
3104 return VINF_SUCCESS;
3105
3106 case IEMMODE_32BIT:
3107 IEM_MC_BEGIN(4, 2);
3108 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3109 IEM_MC_ARG(uint32_t, u32Src, 1);
3110 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3111 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3112 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3113
3114 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3115 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3116 IEM_MC_ASSIGN(cShiftArg, cShift);
3117 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3118 IEM_MC_FETCH_EFLAGS(EFlags);
3119 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3120 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3121
3122 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3123 IEM_MC_COMMIT_EFLAGS(EFlags);
3124 IEM_MC_ADVANCE_RIP();
3125 IEM_MC_END();
3126 return VINF_SUCCESS;
3127
3128 case IEMMODE_64BIT:
3129 IEM_MC_BEGIN(4, 2);
3130 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3131 IEM_MC_ARG(uint64_t, u64Src, 1);
3132 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3133 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3135
3136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3137 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3138 IEM_MC_ASSIGN(cShiftArg, cShift);
3139 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3140 IEM_MC_FETCH_EFLAGS(EFlags);
3141 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3142 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3143
3144 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3145 IEM_MC_COMMIT_EFLAGS(EFlags);
3146 IEM_MC_ADVANCE_RIP();
3147 IEM_MC_END();
3148 return VINF_SUCCESS;
3149
3150 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3151 }
3152 }
3153}
3154
3155
3156/**
3157 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3158 */
3159FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3160{
3161 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3162 IEMOP_HLP_NO_LOCK_PREFIX();
3163 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3164
3165 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3166 {
3167 IEMOP_HLP_NO_LOCK_PREFIX();
3168
3169 switch (pIemCpu->enmEffOpSize)
3170 {
3171 case IEMMODE_16BIT:
3172 IEM_MC_BEGIN(4, 0);
3173 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3174 IEM_MC_ARG(uint16_t, u16Src, 1);
3175 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3176 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3177
3178 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3179 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3180 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3181 IEM_MC_REF_EFLAGS(pEFlags);
3182 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3183
3184 IEM_MC_ADVANCE_RIP();
3185 IEM_MC_END();
3186 return VINF_SUCCESS;
3187
3188 case IEMMODE_32BIT:
3189 IEM_MC_BEGIN(4, 0);
3190 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3191 IEM_MC_ARG(uint32_t, u32Src, 1);
3192 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3193 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3194
3195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3196 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3197 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3198 IEM_MC_REF_EFLAGS(pEFlags);
3199 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3200
3201 IEM_MC_ADVANCE_RIP();
3202 IEM_MC_END();
3203 return VINF_SUCCESS;
3204
3205 case IEMMODE_64BIT:
3206 IEM_MC_BEGIN(4, 0);
3207 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3208 IEM_MC_ARG(uint64_t, u64Src, 1);
3209 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3210 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3211
3212 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3213 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3214 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3215 IEM_MC_REF_EFLAGS(pEFlags);
3216 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3217
3218 IEM_MC_ADVANCE_RIP();
3219 IEM_MC_END();
3220 return VINF_SUCCESS;
3221
3222 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3223 }
3224 }
3225 else
3226 {
3227 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3228
3229 switch (pIemCpu->enmEffOpSize)
3230 {
3231 case IEMMODE_16BIT:
3232 IEM_MC_BEGIN(4, 2);
3233 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3234 IEM_MC_ARG(uint16_t, u16Src, 1);
3235 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3236 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3237 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3238
3239 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3240 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3241 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3242 IEM_MC_FETCH_EFLAGS(EFlags);
3243 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3244 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3245
3246 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3247 IEM_MC_COMMIT_EFLAGS(EFlags);
3248 IEM_MC_ADVANCE_RIP();
3249 IEM_MC_END();
3250 return VINF_SUCCESS;
3251
3252 case IEMMODE_32BIT:
3253 IEM_MC_BEGIN(4, 2);
3254 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3255 IEM_MC_ARG(uint32_t, u32Src, 1);
3256 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3257 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3258 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3259
3260 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3261 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3262 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3263 IEM_MC_FETCH_EFLAGS(EFlags);
3264 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3265 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3266
3267 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3268 IEM_MC_COMMIT_EFLAGS(EFlags);
3269 IEM_MC_ADVANCE_RIP();
3270 IEM_MC_END();
3271 return VINF_SUCCESS;
3272
3273 case IEMMODE_64BIT:
3274 IEM_MC_BEGIN(4, 2);
3275 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3276 IEM_MC_ARG(uint64_t, u64Src, 1);
3277 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3278 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3279 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3280
3281 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3282 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3283 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3284 IEM_MC_FETCH_EFLAGS(EFlags);
3285 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3286 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3287
3288 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3289 IEM_MC_COMMIT_EFLAGS(EFlags);
3290 IEM_MC_ADVANCE_RIP();
3291 IEM_MC_END();
3292 return VINF_SUCCESS;
3293
3294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3295 }
3296 }
3297}
3298
3299
3300
3301/** Opcode 0x0f 0xa4. */
3302FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3303{
3304 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3305 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3306}
3307
3308
3309/** Opcode 0x0f 0xa7. */
3310FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3311{
3312 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3313 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3314}
3315
3316
3317/** Opcode 0x0f 0xa8. */
3318FNIEMOP_DEF(iemOp_push_gs)
3319{
3320 IEMOP_MNEMONIC("push gs");
3321 IEMOP_HLP_NO_LOCK_PREFIX();
3322 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3323}
3324
3325
3326/** Opcode 0x0f 0xa9. */
3327FNIEMOP_DEF(iemOp_pop_gs)
3328{
3329 IEMOP_MNEMONIC("pop gs");
3330 IEMOP_HLP_NO_LOCK_PREFIX();
3331 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3332}
3333
3334
3335/** Opcode 0x0f 0xaa. */
3336FNIEMOP_STUB(iemOp_rsm);
3337
3338
3339/** Opcode 0x0f 0xab. */
3340FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3341{
3342 IEMOP_MNEMONIC("bts Ev,Gv");
3343 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3344}
3345
3346
3347/** Opcode 0x0f 0xac. */
3348FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3349{
3350 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3351 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3352}
3353
3354
3355/** Opcode 0x0f 0xad. */
3356FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3357{
3358 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3359 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3360}
3361
3362
3363/** Opcode 0x0f 0xae mem/0. */
3364FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
3365{
3366 IEMOP_MNEMONIC("fxsave m512");
3367 IEMOP_HLP_NO_LOCK_PREFIX();
3368 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
3369 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
3370
3371 IEM_MC_BEGIN(3, 1);
3372 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
3373 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
3374 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
3375 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3376 IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize);
3377 IEM_MC_END();
3378 return VINF_SUCCESS;
3379}
3380
3381
3382/** Opcode 0x0f 0xae mem/1. */
3383FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm)
3384{
3385 IEMOP_MNEMONIC("fxrstor m512");
3386 IEMOP_HLP_NO_LOCK_PREFIX();
3387 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
3388 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
3389
3390 IEM_MC_BEGIN(3, 1);
3391 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
3392 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
3393 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
3394 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3395 IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize);
3396 IEM_MC_END();
3397 return VINF_SUCCESS;
3398}
3399
3400
3401/** Opcode 0x0f 0xae mem/2. */
3402FNIEMOP_STUB_1(iemOp_Grp15_ldmxcsr, uint8_t, bRm);
3403
3404/** Opcode 0x0f 0xae mem/3. */
3405FNIEMOP_STUB_1(iemOp_Grp15_stmxcsr, uint8_t, bRm);
3406
3407/** Opcode 0x0f 0xae mem/4. */
3408FNIEMOP_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm);
3409
3410/** Opcode 0x0f 0xae mem/5. */
3411FNIEMOP_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm);
3412
3413/** Opcode 0x0f 0xae mem/6. */
3414FNIEMOP_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm);
3415
3416/** Opcode 0x0f 0xae mem/7. */
3417FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm);
3418
3419/** Opcode 0x0f 0xae 11b/5. */
3420FNIEMOP_STUB_1(iemOp_Grp15_lfence, uint8_t, bRm);
3421
3422/** Opcode 0x0f 0xae 11b/6. */
3423FNIEMOP_STUB_1(iemOp_Grp15_mfence, uint8_t, bRm);
3424
3425/** Opcode 0x0f 0xae 11b/7. */
3426FNIEMOP_STUB_1(iemOp_Grp15_sfence, uint8_t, bRm);
3427
3428/** Opcode 0xf3 0x0f 0xae 11b/0. */
3429FNIEMOP_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm);
3430
3431/** Opcode 0xf3 0x0f 0xae 11b/1. */
3432FNIEMOP_STUB_1(iemOp_Grp15_rdgsbase, uint8_t, bRm);
3433
3434/** Opcode 0xf3 0x0f 0xae 11b/2. */
3435FNIEMOP_STUB_1(iemOp_Grp15_wrfsbase, uint8_t, bRm);
3436
3437/** Opcode 0xf3 0x0f 0xae 11b/3. */
3438FNIEMOP_STUB_1(iemOp_Grp15_wrgsbase, uint8_t, bRm);
3439
3440
3441/** Opcode 0x0f 0xae. */
3442FNIEMOP_DEF(iemOp_Grp15)
3443{
3444 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3445 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
3446 {
3447 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3448 {
3449 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_fxsave, bRm);
3450 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_fxrstor, bRm);
3451 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_ldmxcsr, bRm);
3452 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_stmxcsr, bRm);
3453 case 4: return FNIEMOP_CALL_1(iemOp_Grp15_xsave, bRm);
3454 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_xrstor, bRm);
3455 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_xsaveopt,bRm);
3456 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_clflush, bRm);
3457 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3458 }
3459 }
3460 else
3461 {
3462 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))
3463 {
3464 case 0:
3465 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3466 {
3467 case 0: return IEMOP_RAISE_INVALID_OPCODE();
3468 case 1: return IEMOP_RAISE_INVALID_OPCODE();
3469 case 2: return IEMOP_RAISE_INVALID_OPCODE();
3470 case 3: return IEMOP_RAISE_INVALID_OPCODE();
3471 case 4: return IEMOP_RAISE_INVALID_OPCODE();
3472 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_lfence, bRm);
3473 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_mfence, bRm);
3474 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_sfence, bRm);
3475 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3476 }
3477 break;
3478
3479 case IEM_OP_PRF_REPZ:
3480 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3481 {
3482 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_rdfsbase, bRm);
3483 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_rdgsbase, bRm);
3484 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_wrfsbase, bRm);
3485 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_wrgsbase, bRm);
3486 case 4: return IEMOP_RAISE_INVALID_OPCODE();
3487 case 5: return IEMOP_RAISE_INVALID_OPCODE();
3488 case 6: return IEMOP_RAISE_INVALID_OPCODE();
3489 case 7: return IEMOP_RAISE_INVALID_OPCODE();
3490 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3491 }
3492 break;
3493
3494 default:
3495 return IEMOP_RAISE_INVALID_OPCODE();
3496 }
3497 }
3498}
3499
3500
3501/** Opcode 0x0f 0xaf. */
3502FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3503{
3504 IEMOP_MNEMONIC("imul Gv,Ev");
3505 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3506 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3507}
3508
3509
3510/** Opcode 0x0f 0xb0. */
3511FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3512/** Opcode 0x0f 0xb1. */
3513FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3514
3515
3516FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3517{
3518 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3519 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3520
3521 /* The source cannot be a register. */
3522 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3523 return IEMOP_RAISE_INVALID_OPCODE();
3524 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3525
3526 switch (pIemCpu->enmEffOpSize)
3527 {
3528 case IEMMODE_16BIT:
3529 IEM_MC_BEGIN(5, 1);
3530 IEM_MC_ARG(uint16_t, uSel, 0);
3531 IEM_MC_ARG(uint16_t, offSeg, 1);
3532 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3533 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3534 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3535 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3536 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3537 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3538 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
3539 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3540 IEM_MC_END();
3541 return VINF_SUCCESS;
3542
3543 case IEMMODE_32BIT:
3544 IEM_MC_BEGIN(5, 1);
3545 IEM_MC_ARG(uint16_t, uSel, 0);
3546 IEM_MC_ARG(uint32_t, offSeg, 1);
3547 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3548 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3549 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3550 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3551 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3552 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3553 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
3554 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3555 IEM_MC_END();
3556 return VINF_SUCCESS;
3557
3558 case IEMMODE_64BIT:
3559 IEM_MC_BEGIN(5, 1);
3560 IEM_MC_ARG(uint16_t, uSel, 0);
3561 IEM_MC_ARG(uint64_t, offSeg, 1);
3562 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3563 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3564 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3565 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3566 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3567 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3568 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
3569 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3570 IEM_MC_END();
3571 return VINF_SUCCESS;
3572
3573 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3574 }
3575}
3576
3577
3578/** Opcode 0x0f 0xb2. */
3579FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3580{
3581 IEMOP_MNEMONIC("lss Gv,Mp");
3582 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3583}
3584
3585
3586/** Opcode 0x0f 0xb3. */
3587FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3588{
3589 IEMOP_MNEMONIC("btr Ev,Gv");
3590 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3591}
3592
3593
3594/** Opcode 0x0f 0xb4. */
3595FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3596{
3597 IEMOP_MNEMONIC("lfs Gv,Mp");
3598 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3599}
3600
3601
3602/** Opcode 0x0f 0xb5. */
3603FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3604{
3605 IEMOP_MNEMONIC("lgs Gv,Mp");
3606 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3607}
3608
3609
3610/** Opcode 0x0f 0xb6. */
3611FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3612{
3613 IEMOP_MNEMONIC("movzx Gv,Eb");
3614
3615 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3616 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3617
3618 /*
3619 * If rm is denoting a register, no more instruction bytes.
3620 */
3621 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3622 {
3623 switch (pIemCpu->enmEffOpSize)
3624 {
3625 case IEMMODE_16BIT:
3626 IEM_MC_BEGIN(0, 1);
3627 IEM_MC_LOCAL(uint16_t, u16Value);
3628 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3629 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3630 IEM_MC_ADVANCE_RIP();
3631 IEM_MC_END();
3632 return VINF_SUCCESS;
3633
3634 case IEMMODE_32BIT:
3635 IEM_MC_BEGIN(0, 1);
3636 IEM_MC_LOCAL(uint32_t, u32Value);
3637 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3638 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3639 IEM_MC_ADVANCE_RIP();
3640 IEM_MC_END();
3641 return VINF_SUCCESS;
3642
3643 case IEMMODE_64BIT:
3644 IEM_MC_BEGIN(0, 1);
3645 IEM_MC_LOCAL(uint64_t, u64Value);
3646 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3647 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3648 IEM_MC_ADVANCE_RIP();
3649 IEM_MC_END();
3650 return VINF_SUCCESS;
3651
3652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3653 }
3654 }
3655 else
3656 {
3657 /*
3658 * We're loading a register from memory.
3659 */
3660 switch (pIemCpu->enmEffOpSize)
3661 {
3662 case IEMMODE_16BIT:
3663 IEM_MC_BEGIN(0, 2);
3664 IEM_MC_LOCAL(uint16_t, u16Value);
3665 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3666 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3667 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3668 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3669 IEM_MC_ADVANCE_RIP();
3670 IEM_MC_END();
3671 return VINF_SUCCESS;
3672
3673 case IEMMODE_32BIT:
3674 IEM_MC_BEGIN(0, 2);
3675 IEM_MC_LOCAL(uint32_t, u32Value);
3676 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3677 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3678 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3679 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3680 IEM_MC_ADVANCE_RIP();
3681 IEM_MC_END();
3682 return VINF_SUCCESS;
3683
3684 case IEMMODE_64BIT:
3685 IEM_MC_BEGIN(0, 2);
3686 IEM_MC_LOCAL(uint64_t, u64Value);
3687 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3688 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3689 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3690 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3691 IEM_MC_ADVANCE_RIP();
3692 IEM_MC_END();
3693 return VINF_SUCCESS;
3694
3695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3696 }
3697 }
3698}
3699
3700
3701/** Opcode 0x0f 0xb7. */
3702FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3703{
3704 IEMOP_MNEMONIC("movzx Gv,Ew");
3705
3706 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3707 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3708
3709 /** @todo Not entirely sure how the operand size prefix is handled here,
3710 * assuming that it will be ignored. Would be nice to have a few
3711 * test for this. */
3712 /*
3713 * If rm is denoting a register, no more instruction bytes.
3714 */
3715 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3716 {
3717 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3718 {
3719 IEM_MC_BEGIN(0, 1);
3720 IEM_MC_LOCAL(uint32_t, u32Value);
3721 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3722 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3723 IEM_MC_ADVANCE_RIP();
3724 IEM_MC_END();
3725 }
3726 else
3727 {
3728 IEM_MC_BEGIN(0, 1);
3729 IEM_MC_LOCAL(uint64_t, u64Value);
3730 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3731 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3732 IEM_MC_ADVANCE_RIP();
3733 IEM_MC_END();
3734 }
3735 }
3736 else
3737 {
3738 /*
3739 * We're loading a register from memory.
3740 */
3741 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3742 {
3743 IEM_MC_BEGIN(0, 2);
3744 IEM_MC_LOCAL(uint32_t, u32Value);
3745 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3746 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3747 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3748 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3749 IEM_MC_ADVANCE_RIP();
3750 IEM_MC_END();
3751 }
3752 else
3753 {
3754 IEM_MC_BEGIN(0, 2);
3755 IEM_MC_LOCAL(uint64_t, u64Value);
3756 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3757 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3758 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3759 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3760 IEM_MC_ADVANCE_RIP();
3761 IEM_MC_END();
3762 }
3763 }
3764 return VINF_SUCCESS;
3765}
3766
3767
3768/** Opcode 0x0f 0xb8. */
3769FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3770/** Opcode 0x0f 0xb9. */
3771FNIEMOP_STUB(iemOp_Grp10);
3772
3773
3774/** Opcode 0x0f 0xba. */
3775FNIEMOP_DEF(iemOp_Grp8)
3776{
3777 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3778 PCIEMOPBINSIZES pImpl;
3779 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3780 {
3781 case 0: case 1: case 2: case 3:
3782 return IEMOP_RAISE_INVALID_OPCODE();
3783 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3784 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3785 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3786 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3787 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3788 }
3789 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3790
3791 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3792 {
3793 /* register destination. */
3794 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3795 IEMOP_HLP_NO_LOCK_PREFIX();
3796
3797 switch (pIemCpu->enmEffOpSize)
3798 {
3799 case IEMMODE_16BIT:
3800 IEM_MC_BEGIN(3, 0);
3801 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3802 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3803 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3804
3805 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3806 IEM_MC_REF_EFLAGS(pEFlags);
3807 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3808
3809 IEM_MC_ADVANCE_RIP();
3810 IEM_MC_END();
3811 return VINF_SUCCESS;
3812
3813 case IEMMODE_32BIT:
3814 IEM_MC_BEGIN(3, 0);
3815 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3816 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3817 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3818
3819 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3820 IEM_MC_REF_EFLAGS(pEFlags);
3821 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3822
3823 IEM_MC_ADVANCE_RIP();
3824 IEM_MC_END();
3825 return VINF_SUCCESS;
3826
3827 case IEMMODE_64BIT:
3828 IEM_MC_BEGIN(3, 0);
3829 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3830 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3831 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3832
3833 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3834 IEM_MC_REF_EFLAGS(pEFlags);
3835 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3836
3837 IEM_MC_ADVANCE_RIP();
3838 IEM_MC_END();
3839 return VINF_SUCCESS;
3840
3841 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3842 }
3843 }
3844 else
3845 {
3846 /* memory destination. */
3847
3848 uint32_t fAccess;
3849 if (pImpl->pfnLockedU16)
3850 fAccess = IEM_ACCESS_DATA_RW;
3851 else /* BT */
3852 {
3853 IEMOP_HLP_NO_LOCK_PREFIX();
3854 fAccess = IEM_ACCESS_DATA_R;
3855 }
3856
3857 /** @todo test negative bit offsets! */
3858 switch (pIemCpu->enmEffOpSize)
3859 {
3860 case IEMMODE_16BIT:
3861 IEM_MC_BEGIN(3, 1);
3862 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3863 IEM_MC_ARG(uint16_t, u16Src, 1);
3864 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3865 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3866
3867 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3868 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3869 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3870 IEM_MC_FETCH_EFLAGS(EFlags);
3871 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3872 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3873 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3874 else
3875 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3876 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3877
3878 IEM_MC_COMMIT_EFLAGS(EFlags);
3879 IEM_MC_ADVANCE_RIP();
3880 IEM_MC_END();
3881 return VINF_SUCCESS;
3882
3883 case IEMMODE_32BIT:
3884 IEM_MC_BEGIN(3, 1);
3885 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3886 IEM_MC_ARG(uint32_t, u32Src, 1);
3887 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3888 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3889
3890 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3891 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3892 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3893 IEM_MC_FETCH_EFLAGS(EFlags);
3894 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3895 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3896 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3897 else
3898 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3899 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3900
3901 IEM_MC_COMMIT_EFLAGS(EFlags);
3902 IEM_MC_ADVANCE_RIP();
3903 IEM_MC_END();
3904 return VINF_SUCCESS;
3905
3906 case IEMMODE_64BIT:
3907 IEM_MC_BEGIN(3, 1);
3908 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3909 IEM_MC_ARG(uint64_t, u64Src, 1);
3910 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3911 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3912
3913 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3914 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3915 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3916 IEM_MC_FETCH_EFLAGS(EFlags);
3917 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3918 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3919 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3920 else
3921 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3922 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3923
3924 IEM_MC_COMMIT_EFLAGS(EFlags);
3925 IEM_MC_ADVANCE_RIP();
3926 IEM_MC_END();
3927 return VINF_SUCCESS;
3928
3929 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3930 }
3931 }
3932
3933}
3934
3935
3936/** Opcode 0x0f 0xbb. */
3937FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3938{
3939 IEMOP_MNEMONIC("btc Ev,Gv");
3940 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3941}
3942
3943
3944/** Opcode 0x0f 0xbc. */
3945FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3946{
3947 IEMOP_MNEMONIC("bsf Gv,Ev");
3948 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3949 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3950}
3951
3952
3953/** Opcode 0x0f 0xbd. */
3954FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3955{
3956 IEMOP_MNEMONIC("bsr Gv,Ev");
3957 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3958 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3959}
3960
3961
3962/** Opcode 0x0f 0xbe. */
3963FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3964{
3965 IEMOP_MNEMONIC("movsx Gv,Eb");
3966
3967 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3968 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3969
3970 /*
3971 * If rm is denoting a register, no more instruction bytes.
3972 */
3973 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3974 {
3975 switch (pIemCpu->enmEffOpSize)
3976 {
3977 case IEMMODE_16BIT:
3978 IEM_MC_BEGIN(0, 1);
3979 IEM_MC_LOCAL(uint16_t, u16Value);
3980 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3981 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3982 IEM_MC_ADVANCE_RIP();
3983 IEM_MC_END();
3984 return VINF_SUCCESS;
3985
3986 case IEMMODE_32BIT:
3987 IEM_MC_BEGIN(0, 1);
3988 IEM_MC_LOCAL(uint32_t, u32Value);
3989 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3990 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3991 IEM_MC_ADVANCE_RIP();
3992 IEM_MC_END();
3993 return VINF_SUCCESS;
3994
3995 case IEMMODE_64BIT:
3996 IEM_MC_BEGIN(0, 1);
3997 IEM_MC_LOCAL(uint64_t, u64Value);
3998 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3999 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
4000 IEM_MC_ADVANCE_RIP();
4001 IEM_MC_END();
4002 return VINF_SUCCESS;
4003
4004 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4005 }
4006 }
4007 else
4008 {
4009 /*
4010 * We're loading a register from memory.
4011 */
4012 switch (pIemCpu->enmEffOpSize)
4013 {
4014 case IEMMODE_16BIT:
4015 IEM_MC_BEGIN(0, 2);
4016 IEM_MC_LOCAL(uint16_t, u16Value);
4017 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4018 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4019 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
4020 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
4021 IEM_MC_ADVANCE_RIP();
4022 IEM_MC_END();
4023 return VINF_SUCCESS;
4024
4025 case IEMMODE_32BIT:
4026 IEM_MC_BEGIN(0, 2);
4027 IEM_MC_LOCAL(uint32_t, u32Value);
4028 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4030 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
4031 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
4032 IEM_MC_ADVANCE_RIP();
4033 IEM_MC_END();
4034 return VINF_SUCCESS;
4035
4036 case IEMMODE_64BIT:
4037 IEM_MC_BEGIN(0, 2);
4038 IEM_MC_LOCAL(uint64_t, u64Value);
4039 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4040 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4041 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
4042 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
4043 IEM_MC_ADVANCE_RIP();
4044 IEM_MC_END();
4045 return VINF_SUCCESS;
4046
4047 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4048 }
4049 }
4050}
4051
4052
4053/** Opcode 0x0f 0xbf. */
4054FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
4055{
4056 IEMOP_MNEMONIC("movsx Gv,Ew");
4057
4058 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4059 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
4060
4061 /** @todo Not entirely sure how the operand size prefix is handled here,
4062 * assuming that it will be ignored. Would be nice to have a few
4063 * test for this. */
4064 /*
4065 * If rm is denoting a register, no more instruction bytes.
4066 */
4067 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4068 {
4069 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
4070 {
4071 IEM_MC_BEGIN(0, 1);
4072 IEM_MC_LOCAL(uint32_t, u32Value);
4073 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4074 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
4075 IEM_MC_ADVANCE_RIP();
4076 IEM_MC_END();
4077 }
4078 else
4079 {
4080 IEM_MC_BEGIN(0, 1);
4081 IEM_MC_LOCAL(uint64_t, u64Value);
4082 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4083 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
4084 IEM_MC_ADVANCE_RIP();
4085 IEM_MC_END();
4086 }
4087 }
4088 else
4089 {
4090 /*
4091 * We're loading a register from memory.
4092 */
4093 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
4094 {
4095 IEM_MC_BEGIN(0, 2);
4096 IEM_MC_LOCAL(uint32_t, u32Value);
4097 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4098 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4099 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
4100 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
4101 IEM_MC_ADVANCE_RIP();
4102 IEM_MC_END();
4103 }
4104 else
4105 {
4106 IEM_MC_BEGIN(0, 2);
4107 IEM_MC_LOCAL(uint64_t, u64Value);
4108 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4109 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4110 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
4111 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
4112 IEM_MC_ADVANCE_RIP();
4113 IEM_MC_END();
4114 }
4115 }
4116 return VINF_SUCCESS;
4117}
4118
4119
4120/** Opcode 0x0f 0xc0. */
4121FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
4122{
4123 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4124 IEMOP_MNEMONIC("xadd Eb,Gb");
4125
4126 /*
4127 * If rm is denoting a register, no more instruction bytes.
4128 */
4129 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4130 {
4131 IEMOP_HLP_NO_LOCK_PREFIX();
4132
4133 IEM_MC_BEGIN(3, 0);
4134 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
4135 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
4136 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4137
4138 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4139 IEM_MC_REF_GREG_U8(pu8Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4140 IEM_MC_REF_EFLAGS(pEFlags);
4141 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
4142
4143 IEM_MC_ADVANCE_RIP();
4144 IEM_MC_END();
4145 }
4146 else
4147 {
4148 /*
4149 * We're accessing memory.
4150 */
4151 IEM_MC_BEGIN(3, 3);
4152 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
4153 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
4154 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4155 IEM_MC_LOCAL(uint8_t, u8RegCopy);
4156 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4157
4158 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4159 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4160 IEM_MC_FETCH_GREG_U8(u8RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4161 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
4162 IEM_MC_FETCH_EFLAGS(EFlags);
4163 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4164 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
4165 else
4166 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
4167
4168 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
4169 IEM_MC_COMMIT_EFLAGS(EFlags);
4170 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8RegCopy);
4171 IEM_MC_ADVANCE_RIP();
4172 IEM_MC_END();
4173 return VINF_SUCCESS;
4174 }
4175 return VINF_SUCCESS;
4176}
4177
4178
4179/** Opcode 0x0f 0xc1. */
4180FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
4181{
4182 IEMOP_MNEMONIC("xadd Ev,Gv");
4183 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4184
4185 /*
4186 * If rm is denoting a register, no more instruction bytes.
4187 */
4188 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4189 {
4190 IEMOP_HLP_NO_LOCK_PREFIX();
4191
4192 switch (pIemCpu->enmEffOpSize)
4193 {
4194 case IEMMODE_16BIT:
4195 IEM_MC_BEGIN(3, 0);
4196 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4197 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
4198 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4199
4200 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4201 IEM_MC_REF_GREG_U16(pu16Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4202 IEM_MC_REF_EFLAGS(pEFlags);
4203 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
4204
4205 IEM_MC_ADVANCE_RIP();
4206 IEM_MC_END();
4207 return VINF_SUCCESS;
4208
4209 case IEMMODE_32BIT:
4210 IEM_MC_BEGIN(3, 0);
4211 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4212 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
4213 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4214
4215 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4216 IEM_MC_REF_GREG_U32(pu32Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4217 IEM_MC_REF_EFLAGS(pEFlags);
4218 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
4219
4220 IEM_MC_ADVANCE_RIP();
4221 IEM_MC_END();
4222 return VINF_SUCCESS;
4223
4224 case IEMMODE_64BIT:
4225 IEM_MC_BEGIN(3, 0);
4226 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4227 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
4228 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4229
4230 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4231 IEM_MC_REF_GREG_U64(pu64Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4232 IEM_MC_REF_EFLAGS(pEFlags);
4233 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
4234
4235 IEM_MC_ADVANCE_RIP();
4236 IEM_MC_END();
4237 return VINF_SUCCESS;
4238
4239 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4240 }
4241 }
4242 else
4243 {
4244 /*
4245 * We're accessing memory.
4246 */
4247 switch (pIemCpu->enmEffOpSize)
4248 {
4249 case IEMMODE_16BIT:
4250 IEM_MC_BEGIN(3, 3);
4251 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4252 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
4253 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4254 IEM_MC_LOCAL(uint16_t, u16RegCopy);
4255 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4256
4257 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4258 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4259 IEM_MC_FETCH_GREG_U16(u16RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4260 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
4261 IEM_MC_FETCH_EFLAGS(EFlags);
4262 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4263 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
4264 else
4265 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
4266
4267 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4268 IEM_MC_COMMIT_EFLAGS(EFlags);
4269 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16RegCopy);
4270 IEM_MC_ADVANCE_RIP();
4271 IEM_MC_END();
4272 return VINF_SUCCESS;
4273
4274 case IEMMODE_32BIT:
4275 IEM_MC_BEGIN(3, 3);
4276 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4277 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
4278 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4279 IEM_MC_LOCAL(uint32_t, u32RegCopy);
4280 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4281
4282 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4283 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4284 IEM_MC_FETCH_GREG_U32(u32RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4285 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
4286 IEM_MC_FETCH_EFLAGS(EFlags);
4287 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4288 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
4289 else
4290 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
4291
4292 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4293 IEM_MC_COMMIT_EFLAGS(EFlags);
4294 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32RegCopy);
4295 IEM_MC_ADVANCE_RIP();
4296 IEM_MC_END();
4297 return VINF_SUCCESS;
4298
4299 case IEMMODE_64BIT:
4300 IEM_MC_BEGIN(3, 3);
4301 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4302 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
4303 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4304 IEM_MC_LOCAL(uint64_t, u64RegCopy);
4305 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4306
4307 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4308 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4309 IEM_MC_FETCH_GREG_U64(u64RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4310 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
4311 IEM_MC_FETCH_EFLAGS(EFlags);
4312 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4313 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
4314 else
4315 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
4316
4317 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4318 IEM_MC_COMMIT_EFLAGS(EFlags);
4319 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64RegCopy);
4320 IEM_MC_ADVANCE_RIP();
4321 IEM_MC_END();
4322 return VINF_SUCCESS;
4323
4324 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4325 }
4326 }
4327}
4328
4329/** Opcode 0x0f 0xc2. */
4330FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
4331/** Opcode 0x0f 0xc3. */
4332FNIEMOP_STUB(iemOp_movnti_My_Gy);
4333/** Opcode 0x0f 0xc4. */
4334FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
4335/** Opcode 0x0f 0xc5. */
4336FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
4337/** Opcode 0x0f 0xc6. */
4338FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
4339/** Opcode 0x0f 0xc7. */
4340FNIEMOP_STUB(iemOp_Grp9);
4341
4342
4343/**
4344 * Common 'bswap register' helper.
4345 */
4346FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
4347{
4348 IEMOP_HLP_NO_LOCK_PREFIX();
4349 switch (pIemCpu->enmEffOpSize)
4350 {
4351 case IEMMODE_16BIT:
4352 IEM_MC_BEGIN(1, 0);
4353 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4354 IEM_MC_REF_GREG_U32(pu32Dst, iReg); /* Don't clear the high dword! */
4355 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u16, pu32Dst);
4356 IEM_MC_ADVANCE_RIP();
4357 IEM_MC_END();
4358 return VINF_SUCCESS;
4359
4360 case IEMMODE_32BIT:
4361 IEM_MC_BEGIN(1, 0);
4362 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4363 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4364 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4365 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u32, pu32Dst);
4366 IEM_MC_ADVANCE_RIP();
4367 IEM_MC_END();
4368 return VINF_SUCCESS;
4369
4370 case IEMMODE_64BIT:
4371 IEM_MC_BEGIN(1, 0);
4372 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4373 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4374 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u64, pu64Dst);
4375 IEM_MC_ADVANCE_RIP();
4376 IEM_MC_END();
4377 return VINF_SUCCESS;
4378
4379 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4380 }
4381}
4382
4383
4384/** Opcode 0x0f 0xc8. */
4385FNIEMOP_DEF(iemOp_bswap_rAX_r8)
4386{
4387 IEMOP_MNEMONIC("bswap rAX/r8");
4388 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexReg);
4389}
4390
4391
4392/** Opcode 0x0f 0xc9. */
4393FNIEMOP_DEF(iemOp_bswap_rCX_r9)
4394{
4395 IEMOP_MNEMONIC("bswap rCX/r9");
4396 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexReg);
4397}
4398
4399
4400/** Opcode 0x0f 0xca. */
4401FNIEMOP_DEF(iemOp_bswap_rDX_r10)
4402{
4403 IEMOP_MNEMONIC("bswap rDX/r9");
4404 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexReg);
4405}
4406
4407
4408/** Opcode 0x0f 0xcb. */
4409FNIEMOP_DEF(iemOp_bswap_rBX_r11)
4410{
4411 IEMOP_MNEMONIC("bswap rBX/r9");
4412 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexReg);
4413}
4414
4415
4416/** Opcode 0x0f 0xcc. */
4417FNIEMOP_DEF(iemOp_bswap_rSP_r12)
4418{
4419 IEMOP_MNEMONIC("bswap rSP/r12");
4420 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexReg);
4421}
4422
4423
4424/** Opcode 0x0f 0xcd. */
4425FNIEMOP_DEF(iemOp_bswap_rBP_r13)
4426{
4427 IEMOP_MNEMONIC("bswap rBP/r13");
4428 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexReg);
4429}
4430
4431
4432/** Opcode 0x0f 0xce. */
4433FNIEMOP_DEF(iemOp_bswap_rSI_r14)
4434{
4435 IEMOP_MNEMONIC("bswap rSI/r14");
4436 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexReg);
4437}
4438
4439
4440/** Opcode 0x0f 0xcf. */
4441FNIEMOP_DEF(iemOp_bswap_rDI_r15)
4442{
4443 IEMOP_MNEMONIC("bswap rDI/r15");
4444 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexReg);
4445}
4446
4447
4448
4449/** Opcode 0x0f 0xd0. */
4450FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
4451/** Opcode 0x0f 0xd1. */
4452FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
4453/** Opcode 0x0f 0xd2. */
4454FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
4455/** Opcode 0x0f 0xd3. */
4456FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
4457/** Opcode 0x0f 0xd4. */
4458FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
4459/** Opcode 0x0f 0xd5. */
4460FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
4461/** Opcode 0x0f 0xd6. */
4462FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
4463/** Opcode 0x0f 0xd7. */
4464FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
4465/** Opcode 0x0f 0xd8. */
4466FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
4467/** Opcode 0x0f 0xd9. */
4468FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
4469/** Opcode 0x0f 0xda. */
4470FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
4471/** Opcode 0x0f 0xdb. */
4472FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
4473/** Opcode 0x0f 0xdc. */
4474FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
4475/** Opcode 0x0f 0xdd. */
4476FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
4477/** Opcode 0x0f 0xde. */
4478FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
4479/** Opcode 0x0f 0xdf. */
4480FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
4481/** Opcode 0x0f 0xe0. */
4482FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
4483/** Opcode 0x0f 0xe1. */
4484FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
4485/** Opcode 0x0f 0xe2. */
4486FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
4487/** Opcode 0x0f 0xe3. */
4488FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
4489/** Opcode 0x0f 0xe4. */
4490FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
4491/** Opcode 0x0f 0xe5. */
4492FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
4493/** Opcode 0x0f 0xe6. */
4494FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
4495/** Opcode 0x0f 0xe7. */
4496FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
4497/** Opcode 0x0f 0xe8. */
4498FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
4499/** Opcode 0x0f 0xe9. */
4500FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
4501/** Opcode 0x0f 0xea. */
4502FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
4503/** Opcode 0x0f 0xeb. */
4504FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
4505/** Opcode 0x0f 0xec. */
4506FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
4507/** Opcode 0x0f 0xed. */
4508FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
4509/** Opcode 0x0f 0xee. */
4510FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
4511/** Opcode 0x0f 0xef. */
4512FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
4513/** Opcode 0x0f 0xf0. */
4514FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
4515/** Opcode 0x0f 0xf1. */
4516FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
4517/** Opcode 0x0f 0xf2. */
4518FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
4519/** Opcode 0x0f 0xf3. */
4520FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
4521/** Opcode 0x0f 0xf4. */
4522FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
4523/** Opcode 0x0f 0xf5. */
4524FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
4525/** Opcode 0x0f 0xf6. */
4526FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
4527/** Opcode 0x0f 0xf7. */
4528FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
4529/** Opcode 0x0f 0xf8. */
4530FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
4531/** Opcode 0x0f 0xf9. */
4532FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
4533/** Opcode 0x0f 0xfa. */
4534FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
4535/** Opcode 0x0f 0xfb. */
4536FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
4537/** Opcode 0x0f 0xfc. */
4538FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
4539/** Opcode 0x0f 0xfd. */
4540FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
4541/** Opcode 0x0f 0xfe. */
4542FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
4543
4544
4545const PFNIEMOP g_apfnTwoByteMap[256] =
4546{
4547 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4548 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4549 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4550 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_GrpP, iemOp_femms, iemOp_3Dnow,
4551 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4552 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4553 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4554 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4555 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4556 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4557 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4558 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4559 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_nop_Ev, iemOp_nop_Ev, iemOp_nop_Ev,
4560 /* 0x1c */ iemOp_nop_Ev, iemOp_nop_Ev, iemOp_nop_Ev, iemOp_nop_Ev,
4561 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4562 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4563 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4564 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4565 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4566 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4567 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4568 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4569 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4570 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4571 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4572 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4573 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4574 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4575 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4576 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4577 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4578 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4579 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4580 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4581 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4582 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4583 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4584 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4585 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4586 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4587 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4588 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4589 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4590 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4591 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4592 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4593 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4594 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4595 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4596 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4597 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4598 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4599 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4600 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4601 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4602 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4603 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4604 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4605 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4606 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4607 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4608 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4609 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4610 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4611 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4612 /* 0x71 */ iemOp_Grp12,
4613 /* 0x72 */ iemOp_Grp13,
4614 /* 0x73 */ iemOp_Grp14,
4615 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4616 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4617 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4618 /* 0x77 */ iemOp_emms,
4619 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4620 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4621 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4622 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4623 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4624 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4625 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4626 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4627 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4628 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4629 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4630 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4631 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4632 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4633 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4634 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4635 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4636 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4637 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4638 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4639 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4640 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4641 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4642 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4643 /* 0xc3 */ iemOp_movnti_My_Gy,
4644 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4645 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4646 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4647 /* 0xc7 */ iemOp_Grp9,
4648 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4649 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4650 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4651 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4652 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4653 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4654 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4655 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4656 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4657 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4658 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4659 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4660 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4661 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4662 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4663 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4664 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4665 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4666 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4667 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4668 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4669 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4670 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4671 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4672 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4673 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4674 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4675 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4676 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4677 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4678 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4679 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4680 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4681 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4682 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4683 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4684 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4685 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4686 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4687 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4688 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4689 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4690 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4691 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4692 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4693 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4694 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4695 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4696 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4697 /* 0xff */ iemOp_Invalid
4698};
4699
4700/** @} */
4701
4702
4703/** @name One byte opcodes.
4704 *
4705 * @{
4706 */
4707
4708/** Opcode 0x00. */
4709FNIEMOP_DEF(iemOp_add_Eb_Gb)
4710{
4711 IEMOP_MNEMONIC("add Eb,Gb");
4712 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4713}
4714
4715
4716/** Opcode 0x01. */
4717FNIEMOP_DEF(iemOp_add_Ev_Gv)
4718{
4719 IEMOP_MNEMONIC("add Ev,Gv");
4720 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4721}
4722
4723
4724/** Opcode 0x02. */
4725FNIEMOP_DEF(iemOp_add_Gb_Eb)
4726{
4727 IEMOP_MNEMONIC("add Gb,Eb");
4728 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4729}
4730
4731
4732/** Opcode 0x03. */
4733FNIEMOP_DEF(iemOp_add_Gv_Ev)
4734{
4735 IEMOP_MNEMONIC("add Gv,Ev");
4736 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4737}
4738
4739
4740/** Opcode 0x04. */
4741FNIEMOP_DEF(iemOp_add_Al_Ib)
4742{
4743 IEMOP_MNEMONIC("add al,Ib");
4744 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4745}
4746
4747
4748/** Opcode 0x05. */
4749FNIEMOP_DEF(iemOp_add_eAX_Iz)
4750{
4751 IEMOP_MNEMONIC("add rAX,Iz");
4752 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4753}
4754
4755
4756/** Opcode 0x06. */
4757FNIEMOP_DEF(iemOp_push_ES)
4758{
4759 IEMOP_MNEMONIC("push es");
4760 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4761}
4762
4763
4764/** Opcode 0x07. */
4765FNIEMOP_DEF(iemOp_pop_ES)
4766{
4767 IEMOP_MNEMONIC("pop es");
4768 IEMOP_HLP_NO_64BIT();
4769 IEMOP_HLP_NO_LOCK_PREFIX();
4770 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4771}
4772
4773
4774/** Opcode 0x08. */
4775FNIEMOP_DEF(iemOp_or_Eb_Gb)
4776{
4777 IEMOP_MNEMONIC("or Eb,Gb");
4778 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4779 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4780}
4781
4782
4783/** Opcode 0x09. */
4784FNIEMOP_DEF(iemOp_or_Ev_Gv)
4785{
4786 IEMOP_MNEMONIC("or Ev,Gv ");
4787 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4788 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4789}
4790
4791
4792/** Opcode 0x0a. */
4793FNIEMOP_DEF(iemOp_or_Gb_Eb)
4794{
4795 IEMOP_MNEMONIC("or Gb,Eb");
4796 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4797 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4798}
4799
4800
4801/** Opcode 0x0b. */
4802FNIEMOP_DEF(iemOp_or_Gv_Ev)
4803{
4804 IEMOP_MNEMONIC("or Gv,Ev");
4805 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4806 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4807}
4808
4809
4810/** Opcode 0x0c. */
4811FNIEMOP_DEF(iemOp_or_Al_Ib)
4812{
4813 IEMOP_MNEMONIC("or al,Ib");
4814 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4815 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4816}
4817
4818
4819/** Opcode 0x0d. */
4820FNIEMOP_DEF(iemOp_or_eAX_Iz)
4821{
4822 IEMOP_MNEMONIC("or rAX,Iz");
4823 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4824 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4825}
4826
4827
4828/** Opcode 0x0e. */
4829FNIEMOP_DEF(iemOp_push_CS)
4830{
4831 IEMOP_MNEMONIC("push cs");
4832 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4833}
4834
4835
4836/** Opcode 0x0f. */
4837FNIEMOP_DEF(iemOp_2byteEscape)
4838{
4839 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4840 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4841}
4842
4843/** Opcode 0x10. */
4844FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4845{
4846 IEMOP_MNEMONIC("adc Eb,Gb");
4847 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4848}
4849
4850
4851/** Opcode 0x11. */
4852FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4853{
4854 IEMOP_MNEMONIC("adc Ev,Gv");
4855 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4856}
4857
4858
4859/** Opcode 0x12. */
4860FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4861{
4862 IEMOP_MNEMONIC("adc Gb,Eb");
4863 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4864}
4865
4866
4867/** Opcode 0x13. */
4868FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4869{
4870 IEMOP_MNEMONIC("adc Gv,Ev");
4871 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4872}
4873
4874
4875/** Opcode 0x14. */
4876FNIEMOP_DEF(iemOp_adc_Al_Ib)
4877{
4878 IEMOP_MNEMONIC("adc al,Ib");
4879 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4880}
4881
4882
4883/** Opcode 0x15. */
4884FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4885{
4886 IEMOP_MNEMONIC("adc rAX,Iz");
4887 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4888}
4889
4890
4891/** Opcode 0x16. */
4892FNIEMOP_DEF(iemOp_push_SS)
4893{
4894 IEMOP_MNEMONIC("push ss");
4895 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4896}
4897
4898
4899/** Opcode 0x17. */
4900FNIEMOP_DEF(iemOp_pop_SS)
4901{
4902 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4903 IEMOP_HLP_NO_LOCK_PREFIX();
4904 IEMOP_HLP_NO_64BIT();
4905 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4906}
4907
4908
4909/** Opcode 0x18. */
4910FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4911{
4912 IEMOP_MNEMONIC("sbb Eb,Gb");
4913 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4914}
4915
4916
4917/** Opcode 0x19. */
4918FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4919{
4920 IEMOP_MNEMONIC("sbb Ev,Gv");
4921 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4922}
4923
4924
4925/** Opcode 0x1a. */
4926FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4927{
4928 IEMOP_MNEMONIC("sbb Gb,Eb");
4929 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4930}
4931
4932
4933/** Opcode 0x1b. */
4934FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4935{
4936 IEMOP_MNEMONIC("sbb Gv,Ev");
4937 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4938}
4939
4940
4941/** Opcode 0x1c. */
4942FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4943{
4944 IEMOP_MNEMONIC("sbb al,Ib");
4945 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4946}
4947
4948
4949/** Opcode 0x1d. */
4950FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4951{
4952 IEMOP_MNEMONIC("sbb rAX,Iz");
4953 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4954}
4955
4956
4957/** Opcode 0x1e. */
4958FNIEMOP_DEF(iemOp_push_DS)
4959{
4960 IEMOP_MNEMONIC("push ds");
4961 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4962}
4963
4964
4965/** Opcode 0x1f. */
4966FNIEMOP_DEF(iemOp_pop_DS)
4967{
4968 IEMOP_MNEMONIC("pop ds");
4969 IEMOP_HLP_NO_LOCK_PREFIX();
4970 IEMOP_HLP_NO_64BIT();
4971 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4972}
4973
4974
4975/** Opcode 0x20. */
4976FNIEMOP_DEF(iemOp_and_Eb_Gb)
4977{
4978 IEMOP_MNEMONIC("and Eb,Gb");
4979 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4980 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4981}
4982
4983
4984/** Opcode 0x21. */
4985FNIEMOP_DEF(iemOp_and_Ev_Gv)
4986{
4987 IEMOP_MNEMONIC("and Ev,Gv");
4988 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4989 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4990}
4991
4992
4993/** Opcode 0x22. */
4994FNIEMOP_DEF(iemOp_and_Gb_Eb)
4995{
4996 IEMOP_MNEMONIC("and Gb,Eb");
4997 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4998 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4999}
5000
5001
5002/** Opcode 0x23. */
5003FNIEMOP_DEF(iemOp_and_Gv_Ev)
5004{
5005 IEMOP_MNEMONIC("and Gv,Ev");
5006 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5007 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
5008}
5009
5010
5011/** Opcode 0x24. */
5012FNIEMOP_DEF(iemOp_and_Al_Ib)
5013{
5014 IEMOP_MNEMONIC("and al,Ib");
5015 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5016 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
5017}
5018
5019
5020/** Opcode 0x25. */
5021FNIEMOP_DEF(iemOp_and_eAX_Iz)
5022{
5023 IEMOP_MNEMONIC("and rAX,Iz");
5024 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5025 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
5026}
5027
5028
5029/** Opcode 0x26. */
5030FNIEMOP_DEF(iemOp_seg_ES)
5031{
5032 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
5033 pIemCpu->iEffSeg = X86_SREG_ES;
5034
5035 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5036 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5037}
5038
5039
5040/** Opcode 0x27. */
5041FNIEMOP_STUB(iemOp_daa);
5042
5043
5044/** Opcode 0x28. */
5045FNIEMOP_DEF(iemOp_sub_Eb_Gb)
5046{
5047 IEMOP_MNEMONIC("sub Eb,Gb");
5048 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
5049}
5050
5051
5052/** Opcode 0x29. */
5053FNIEMOP_DEF(iemOp_sub_Ev_Gv)
5054{
5055 IEMOP_MNEMONIC("sub Ev,Gv");
5056 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
5057}
5058
5059
5060/** Opcode 0x2a. */
5061FNIEMOP_DEF(iemOp_sub_Gb_Eb)
5062{
5063 IEMOP_MNEMONIC("sub Gb,Eb");
5064 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
5065}
5066
5067
5068/** Opcode 0x2b. */
5069FNIEMOP_DEF(iemOp_sub_Gv_Ev)
5070{
5071 IEMOP_MNEMONIC("sub Gv,Ev");
5072 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
5073}
5074
5075
5076/** Opcode 0x2c. */
5077FNIEMOP_DEF(iemOp_sub_Al_Ib)
5078{
5079 IEMOP_MNEMONIC("sub al,Ib");
5080 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
5081}
5082
5083
5084/** Opcode 0x2d. */
5085FNIEMOP_DEF(iemOp_sub_eAX_Iz)
5086{
5087 IEMOP_MNEMONIC("sub rAX,Iz");
5088 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
5089}
5090
5091
5092/** Opcode 0x2e. */
5093FNIEMOP_DEF(iemOp_seg_CS)
5094{
5095 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
5096 pIemCpu->iEffSeg = X86_SREG_CS;
5097
5098 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5099 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5100}
5101
5102
5103/** Opcode 0x2f. */
5104FNIEMOP_STUB(iemOp_das);
5105
5106
5107/** Opcode 0x30. */
5108FNIEMOP_DEF(iemOp_xor_Eb_Gb)
5109{
5110 IEMOP_MNEMONIC("xor Eb,Gb");
5111 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5112 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
5113}
5114
5115
5116/** Opcode 0x31. */
5117FNIEMOP_DEF(iemOp_xor_Ev_Gv)
5118{
5119 IEMOP_MNEMONIC("xor Ev,Gv");
5120 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5121 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
5122}
5123
5124
5125/** Opcode 0x32. */
5126FNIEMOP_DEF(iemOp_xor_Gb_Eb)
5127{
5128 IEMOP_MNEMONIC("xor Gb,Eb");
5129 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5130 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
5131}
5132
5133
5134/** Opcode 0x33. */
5135FNIEMOP_DEF(iemOp_xor_Gv_Ev)
5136{
5137 IEMOP_MNEMONIC("xor Gv,Ev");
5138 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5139 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
5140}
5141
5142
5143/** Opcode 0x34. */
5144FNIEMOP_DEF(iemOp_xor_Al_Ib)
5145{
5146 IEMOP_MNEMONIC("xor al,Ib");
5147 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5148 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
5149}
5150
5151
5152/** Opcode 0x35. */
5153FNIEMOP_DEF(iemOp_xor_eAX_Iz)
5154{
5155 IEMOP_MNEMONIC("xor rAX,Iz");
5156 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
5157 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
5158}
5159
5160
5161/** Opcode 0x36. */
5162FNIEMOP_DEF(iemOp_seg_SS)
5163{
5164 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
5165 pIemCpu->iEffSeg = X86_SREG_SS;
5166
5167 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5168 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5169}
5170
5171
5172/** Opcode 0x37. */
5173FNIEMOP_STUB(iemOp_aaa);
5174
5175
5176/** Opcode 0x38. */
5177FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
5178{
5179 IEMOP_MNEMONIC("cmp Eb,Gb");
5180 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
5181 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
5182}
5183
5184
5185/** Opcode 0x39. */
5186FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
5187{
5188 IEMOP_MNEMONIC("cmp Ev,Gv");
5189 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
5190 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
5191}
5192
5193
5194/** Opcode 0x3a. */
5195FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
5196{
5197 IEMOP_MNEMONIC("cmp Gb,Eb");
5198 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
5199}
5200
5201
5202/** Opcode 0x3b. */
5203FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
5204{
5205 IEMOP_MNEMONIC("cmp Gv,Ev");
5206 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
5207}
5208
5209
5210/** Opcode 0x3c. */
5211FNIEMOP_DEF(iemOp_cmp_Al_Ib)
5212{
5213 IEMOP_MNEMONIC("cmp al,Ib");
5214 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
5215}
5216
5217
5218/** Opcode 0x3d. */
5219FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
5220{
5221 IEMOP_MNEMONIC("cmp rAX,Iz");
5222 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
5223}
5224
5225
5226/** Opcode 0x3e. */
5227FNIEMOP_DEF(iemOp_seg_DS)
5228{
5229 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
5230 pIemCpu->iEffSeg = X86_SREG_DS;
5231
5232 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5233 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5234}
5235
5236
5237/** Opcode 0x3f. */
5238FNIEMOP_STUB(iemOp_aas);
5239
5240/**
5241 * Common 'inc/dec/not/neg register' helper.
5242 */
5243FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
5244{
5245 IEMOP_HLP_NO_LOCK_PREFIX();
5246 switch (pIemCpu->enmEffOpSize)
5247 {
5248 case IEMMODE_16BIT:
5249 IEM_MC_BEGIN(2, 0);
5250 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5251 IEM_MC_ARG(uint32_t *, pEFlags, 1);
5252 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5253 IEM_MC_REF_EFLAGS(pEFlags);
5254 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
5255 IEM_MC_ADVANCE_RIP();
5256 IEM_MC_END();
5257 return VINF_SUCCESS;
5258
5259 case IEMMODE_32BIT:
5260 IEM_MC_BEGIN(2, 0);
5261 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5262 IEM_MC_ARG(uint32_t *, pEFlags, 1);
5263 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5264 IEM_MC_REF_EFLAGS(pEFlags);
5265 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
5266 IEM_MC_ADVANCE_RIP();
5267 IEM_MC_END();
5268 return VINF_SUCCESS;
5269
5270 case IEMMODE_64BIT:
5271 IEM_MC_BEGIN(2, 0);
5272 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5273 IEM_MC_ARG(uint32_t *, pEFlags, 1);
5274 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5275 IEM_MC_REF_EFLAGS(pEFlags);
5276 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
5277 IEM_MC_ADVANCE_RIP();
5278 IEM_MC_END();
5279 return VINF_SUCCESS;
5280 }
5281 return VINF_SUCCESS;
5282}
5283
5284
5285/** Opcode 0x40. */
5286FNIEMOP_DEF(iemOp_inc_eAX)
5287{
5288 /*
5289 * This is a REX prefix in 64-bit mode.
5290 */
5291 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5292 {
5293 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
5294
5295 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5296 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5297 }
5298
5299 IEMOP_MNEMONIC("inc eAX");
5300 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
5301}
5302
5303
5304/** Opcode 0x41. */
5305FNIEMOP_DEF(iemOp_inc_eCX)
5306{
5307 /*
5308 * This is a REX prefix in 64-bit mode.
5309 */
5310 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5311 {
5312 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
5313 pIemCpu->uRexB = 1 << 3;
5314
5315 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5316 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5317 }
5318
5319 IEMOP_MNEMONIC("inc eCX");
5320 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
5321}
5322
5323
5324/** Opcode 0x42. */
5325FNIEMOP_DEF(iemOp_inc_eDX)
5326{
5327 /*
5328 * This is a REX prefix in 64-bit mode.
5329 */
5330 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5331 {
5332 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
5333 pIemCpu->uRexIndex = 1 << 3;
5334
5335 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5336 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5337 }
5338
5339 IEMOP_MNEMONIC("inc eDX");
5340 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
5341}
5342
5343
5344
5345/** Opcode 0x43. */
5346FNIEMOP_DEF(iemOp_inc_eBX)
5347{
5348 /*
5349 * This is a REX prefix in 64-bit mode.
5350 */
5351 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5352 {
5353 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5354 pIemCpu->uRexB = 1 << 3;
5355 pIemCpu->uRexIndex = 1 << 3;
5356
5357 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5358 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5359 }
5360
5361 IEMOP_MNEMONIC("inc eBX");
5362 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
5363}
5364
5365
5366/** Opcode 0x44. */
5367FNIEMOP_DEF(iemOp_inc_eSP)
5368{
5369 /*
5370 * This is a REX prefix in 64-bit mode.
5371 */
5372 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5373 {
5374 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
5375 pIemCpu->uRexReg = 1 << 3;
5376
5377 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5378 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5379 }
5380
5381 IEMOP_MNEMONIC("inc eSP");
5382 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
5383}
5384
5385
5386/** Opcode 0x45. */
5387FNIEMOP_DEF(iemOp_inc_eBP)
5388{
5389 /*
5390 * This is a REX prefix in 64-bit mode.
5391 */
5392 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5393 {
5394 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
5395 pIemCpu->uRexReg = 1 << 3;
5396 pIemCpu->uRexB = 1 << 3;
5397
5398 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5399 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5400 }
5401
5402 IEMOP_MNEMONIC("inc eBP");
5403 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
5404}
5405
5406
5407/** Opcode 0x46. */
5408FNIEMOP_DEF(iemOp_inc_eSI)
5409{
5410 /*
5411 * This is a REX prefix in 64-bit mode.
5412 */
5413 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5414 {
5415 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
5416 pIemCpu->uRexReg = 1 << 3;
5417 pIemCpu->uRexIndex = 1 << 3;
5418
5419 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5420 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5421 }
5422
5423 IEMOP_MNEMONIC("inc eSI");
5424 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
5425}
5426
5427
5428/** Opcode 0x47. */
5429FNIEMOP_DEF(iemOp_inc_eDI)
5430{
5431 /*
5432 * This is a REX prefix in 64-bit mode.
5433 */
5434 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5435 {
5436 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5437 pIemCpu->uRexReg = 1 << 3;
5438 pIemCpu->uRexB = 1 << 3;
5439 pIemCpu->uRexIndex = 1 << 3;
5440
5441 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5442 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5443 }
5444
5445 IEMOP_MNEMONIC("inc eDI");
5446 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
5447}
5448
5449
5450/** Opcode 0x48. */
5451FNIEMOP_DEF(iemOp_dec_eAX)
5452{
5453 /*
5454 * This is a REX prefix in 64-bit mode.
5455 */
5456 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5457 {
5458 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
5459 iemRecalEffOpSize(pIemCpu);
5460
5461 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5462 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5463 }
5464
5465 IEMOP_MNEMONIC("dec eAX");
5466 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
5467}
5468
5469
5470/** Opcode 0x49. */
5471FNIEMOP_DEF(iemOp_dec_eCX)
5472{
5473 /*
5474 * This is a REX prefix in 64-bit mode.
5475 */
5476 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5477 {
5478 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5479 pIemCpu->uRexB = 1 << 3;
5480 iemRecalEffOpSize(pIemCpu);
5481
5482 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5483 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5484 }
5485
5486 IEMOP_MNEMONIC("dec eCX");
5487 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
5488}
5489
5490
5491/** Opcode 0x4a. */
5492FNIEMOP_DEF(iemOp_dec_eDX)
5493{
5494 /*
5495 * This is a REX prefix in 64-bit mode.
5496 */
5497 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5498 {
5499 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5500 pIemCpu->uRexIndex = 1 << 3;
5501 iemRecalEffOpSize(pIemCpu);
5502
5503 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5504 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5505 }
5506
5507 IEMOP_MNEMONIC("dec eDX");
5508 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
5509}
5510
5511
5512/** Opcode 0x4b. */
5513FNIEMOP_DEF(iemOp_dec_eBX)
5514{
5515 /*
5516 * This is a REX prefix in 64-bit mode.
5517 */
5518 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5519 {
5520 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5521 pIemCpu->uRexB = 1 << 3;
5522 pIemCpu->uRexIndex = 1 << 3;
5523 iemRecalEffOpSize(pIemCpu);
5524
5525 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5526 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5527 }
5528
5529 IEMOP_MNEMONIC("dec eBX");
5530 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
5531}
5532
5533
5534/** Opcode 0x4c. */
5535FNIEMOP_DEF(iemOp_dec_eSP)
5536{
5537 /*
5538 * This is a REX prefix in 64-bit mode.
5539 */
5540 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5541 {
5542 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
5543 pIemCpu->uRexReg = 1 << 3;
5544 iemRecalEffOpSize(pIemCpu);
5545
5546 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5547 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5548 }
5549
5550 IEMOP_MNEMONIC("dec eSP");
5551 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5552}
5553
5554
5555/** Opcode 0x4d. */
5556FNIEMOP_DEF(iemOp_dec_eBP)
5557{
5558 /*
5559 * This is a REX prefix in 64-bit mode.
5560 */
5561 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5562 {
5563 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5564 pIemCpu->uRexReg = 1 << 3;
5565 pIemCpu->uRexB = 1 << 3;
5566 iemRecalEffOpSize(pIemCpu);
5567
5568 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5569 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5570 }
5571
5572 IEMOP_MNEMONIC("dec eBP");
5573 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5574}
5575
5576
5577/** Opcode 0x4e. */
5578FNIEMOP_DEF(iemOp_dec_eSI)
5579{
5580 /*
5581 * This is a REX prefix in 64-bit mode.
5582 */
5583 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5584 {
5585 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5586 pIemCpu->uRexReg = 1 << 3;
5587 pIemCpu->uRexIndex = 1 << 3;
5588 iemRecalEffOpSize(pIemCpu);
5589
5590 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5591 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5592 }
5593
5594 IEMOP_MNEMONIC("dec eSI");
5595 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5596}
5597
5598
5599/** Opcode 0x4f. */
5600FNIEMOP_DEF(iemOp_dec_eDI)
5601{
5602 /*
5603 * This is a REX prefix in 64-bit mode.
5604 */
5605 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5606 {
5607 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5608 pIemCpu->uRexReg = 1 << 3;
5609 pIemCpu->uRexB = 1 << 3;
5610 pIemCpu->uRexIndex = 1 << 3;
5611 iemRecalEffOpSize(pIemCpu);
5612
5613 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5614 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5615 }
5616
5617 IEMOP_MNEMONIC("dec eDI");
5618 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5619}
5620
5621
5622/**
5623 * Common 'push register' helper.
5624 */
5625FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5626{
5627 IEMOP_HLP_NO_LOCK_PREFIX();
5628 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5629 {
5630 iReg |= pIemCpu->uRexB;
5631 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5632 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5633 }
5634
5635 switch (pIemCpu->enmEffOpSize)
5636 {
5637 case IEMMODE_16BIT:
5638 IEM_MC_BEGIN(0, 1);
5639 IEM_MC_LOCAL(uint16_t, u16Value);
5640 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5641 IEM_MC_PUSH_U16(u16Value);
5642 IEM_MC_ADVANCE_RIP();
5643 IEM_MC_END();
5644 break;
5645
5646 case IEMMODE_32BIT:
5647 IEM_MC_BEGIN(0, 1);
5648 IEM_MC_LOCAL(uint32_t, u32Value);
5649 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5650 IEM_MC_PUSH_U32(u32Value);
5651 IEM_MC_ADVANCE_RIP();
5652 IEM_MC_END();
5653 break;
5654
5655 case IEMMODE_64BIT:
5656 IEM_MC_BEGIN(0, 1);
5657 IEM_MC_LOCAL(uint64_t, u64Value);
5658 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5659 IEM_MC_PUSH_U64(u64Value);
5660 IEM_MC_ADVANCE_RIP();
5661 IEM_MC_END();
5662 break;
5663 }
5664
5665 return VINF_SUCCESS;
5666}
5667
5668
5669/** Opcode 0x50. */
5670FNIEMOP_DEF(iemOp_push_eAX)
5671{
5672 IEMOP_MNEMONIC("push rAX");
5673 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5674}
5675
5676
5677/** Opcode 0x51. */
5678FNIEMOP_DEF(iemOp_push_eCX)
5679{
5680 IEMOP_MNEMONIC("push rCX");
5681 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5682}
5683
5684
5685/** Opcode 0x52. */
5686FNIEMOP_DEF(iemOp_push_eDX)
5687{
5688 IEMOP_MNEMONIC("push rDX");
5689 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5690}
5691
5692
5693/** Opcode 0x53. */
5694FNIEMOP_DEF(iemOp_push_eBX)
5695{
5696 IEMOP_MNEMONIC("push rBX");
5697 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5698}
5699
5700
5701/** Opcode 0x54. */
5702FNIEMOP_DEF(iemOp_push_eSP)
5703{
5704 IEMOP_MNEMONIC("push rSP");
5705 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5706}
5707
5708
5709/** Opcode 0x55. */
5710FNIEMOP_DEF(iemOp_push_eBP)
5711{
5712 IEMOP_MNEMONIC("push rBP");
5713 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5714}
5715
5716
5717/** Opcode 0x56. */
5718FNIEMOP_DEF(iemOp_push_eSI)
5719{
5720 IEMOP_MNEMONIC("push rSI");
5721 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5722}
5723
5724
5725/** Opcode 0x57. */
5726FNIEMOP_DEF(iemOp_push_eDI)
5727{
5728 IEMOP_MNEMONIC("push rDI");
5729 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5730}
5731
5732
5733/**
5734 * Common 'pop register' helper.
5735 */
5736FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5737{
5738 IEMOP_HLP_NO_LOCK_PREFIX();
5739 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5740 {
5741 iReg |= pIemCpu->uRexB;
5742 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5743 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5744 }
5745
5746/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5747 * handle it, for that matter (Intel pseudo code hints that the popped
5748 * value is incremented by the stack item size.) Test it, both encodings
5749 * and all three register sizes. */
5750 switch (pIemCpu->enmEffOpSize)
5751 {
5752 case IEMMODE_16BIT:
5753 IEM_MC_BEGIN(0, 1);
5754 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5755 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5756 IEM_MC_POP_U16(pu16Dst);
5757 IEM_MC_ADVANCE_RIP();
5758 IEM_MC_END();
5759 break;
5760
5761 case IEMMODE_32BIT:
5762 IEM_MC_BEGIN(0, 1);
5763 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5764 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5765 IEM_MC_POP_U32(pu32Dst);
5766 IEM_MC_ADVANCE_RIP();
5767 IEM_MC_END();
5768 break;
5769
5770 case IEMMODE_64BIT:
5771 IEM_MC_BEGIN(0, 1);
5772 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5773 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5774 IEM_MC_POP_U64(pu64Dst);
5775 IEM_MC_ADVANCE_RIP();
5776 IEM_MC_END();
5777 break;
5778 }
5779
5780 return VINF_SUCCESS;
5781}
5782
5783
5784/** Opcode 0x58. */
5785FNIEMOP_DEF(iemOp_pop_eAX)
5786{
5787 IEMOP_MNEMONIC("pop rAX");
5788 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5789}
5790
5791
5792/** Opcode 0x59. */
5793FNIEMOP_DEF(iemOp_pop_eCX)
5794{
5795 IEMOP_MNEMONIC("pop rCX");
5796 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5797}
5798
5799
5800/** Opcode 0x5a. */
5801FNIEMOP_DEF(iemOp_pop_eDX)
5802{
5803 IEMOP_MNEMONIC("pop rDX");
5804 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5805}
5806
5807
5808/** Opcode 0x5b. */
5809FNIEMOP_DEF(iemOp_pop_eBX)
5810{
5811 IEMOP_MNEMONIC("pop rBX");
5812 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5813}
5814
5815
5816/** Opcode 0x5c. */
5817FNIEMOP_DEF(iemOp_pop_eSP)
5818{
5819 IEMOP_MNEMONIC("pop rSP");
5820 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5821}
5822
5823
5824/** Opcode 0x5d. */
5825FNIEMOP_DEF(iemOp_pop_eBP)
5826{
5827 IEMOP_MNEMONIC("pop rBP");
5828 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5829}
5830
5831
5832/** Opcode 0x5e. */
5833FNIEMOP_DEF(iemOp_pop_eSI)
5834{
5835 IEMOP_MNEMONIC("pop rSI");
5836 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5837}
5838
5839
5840/** Opcode 0x5f. */
5841FNIEMOP_DEF(iemOp_pop_eDI)
5842{
5843 IEMOP_MNEMONIC("pop rDI");
5844 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5845}
5846
5847
5848/** Opcode 0x60. */
5849FNIEMOP_DEF(iemOp_pusha)
5850{
5851 IEMOP_MNEMONIC("pusha");
5852 IEMOP_HLP_NO_64BIT();
5853 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5854 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5855 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5856 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5857}
5858
5859
5860/** Opcode 0x61. */
5861FNIEMOP_DEF(iemOp_popa)
5862{
5863 IEMOP_MNEMONIC("popa");
5864 IEMOP_HLP_NO_64BIT();
5865 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5866 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5867 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5868 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5869}
5870
5871
5872/** Opcode 0x62. */
5873FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5874/** Opcode 0x63. */
5875FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5876
5877
5878/** Opcode 0x64. */
5879FNIEMOP_DEF(iemOp_seg_FS)
5880{
5881 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5882 pIemCpu->iEffSeg = X86_SREG_FS;
5883
5884 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5885 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5886}
5887
5888
5889/** Opcode 0x65. */
5890FNIEMOP_DEF(iemOp_seg_GS)
5891{
5892 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5893 pIemCpu->iEffSeg = X86_SREG_GS;
5894
5895 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5896 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5897}
5898
5899
5900/** Opcode 0x66. */
5901FNIEMOP_DEF(iemOp_op_size)
5902{
5903 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5904 iemRecalEffOpSize(pIemCpu);
5905
5906 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5907 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5908}
5909
5910
5911/** Opcode 0x67. */
5912FNIEMOP_DEF(iemOp_addr_size)
5913{
5914 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5915 switch (pIemCpu->enmDefAddrMode)
5916 {
5917 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5918 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5919 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5920 default: AssertFailed();
5921 }
5922
5923 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5924 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5925}
5926
5927
5928/** Opcode 0x68. */
5929FNIEMOP_DEF(iemOp_push_Iz)
5930{
5931 IEMOP_MNEMONIC("push Iz");
5932 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5933 switch (pIemCpu->enmEffOpSize)
5934 {
5935 case IEMMODE_16BIT:
5936 {
5937 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5938 IEMOP_HLP_NO_LOCK_PREFIX();
5939 IEM_MC_BEGIN(0,0);
5940 IEM_MC_PUSH_U16(u16Imm);
5941 IEM_MC_ADVANCE_RIP();
5942 IEM_MC_END();
5943 return VINF_SUCCESS;
5944 }
5945
5946 case IEMMODE_32BIT:
5947 {
5948 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5949 IEMOP_HLP_NO_LOCK_PREFIX();
5950 IEM_MC_BEGIN(0,0);
5951 IEM_MC_PUSH_U32(u32Imm);
5952 IEM_MC_ADVANCE_RIP();
5953 IEM_MC_END();
5954 return VINF_SUCCESS;
5955 }
5956
5957 case IEMMODE_64BIT:
5958 {
5959 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5960 IEMOP_HLP_NO_LOCK_PREFIX();
5961 IEM_MC_BEGIN(0,0);
5962 IEM_MC_PUSH_U64(u64Imm);
5963 IEM_MC_ADVANCE_RIP();
5964 IEM_MC_END();
5965 return VINF_SUCCESS;
5966 }
5967
5968 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5969 }
5970}
5971
5972
5973/** Opcode 0x69. */
5974FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5975{
5976 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5977 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5978 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5979
5980 switch (pIemCpu->enmEffOpSize)
5981 {
5982 case IEMMODE_16BIT:
5983 {
5984 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5985 IEMOP_HLP_NO_LOCK_PREFIX();
5986 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5987 {
5988 /* register operand */
5989 IEM_MC_BEGIN(3, 1);
5990 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5991 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5992 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5993 IEM_MC_LOCAL(uint16_t, u16Tmp);
5994
5995 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5996 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5997 IEM_MC_REF_EFLAGS(pEFlags);
5998 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5999 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
6000
6001 IEM_MC_ADVANCE_RIP();
6002 IEM_MC_END();
6003 }
6004 else
6005 {
6006 /* memory operand */
6007 IEM_MC_BEGIN(3, 2);
6008 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6009 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
6010 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6011 IEM_MC_LOCAL(uint16_t, u16Tmp);
6012 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6013
6014 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6015 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6016 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
6017 IEM_MC_REF_EFLAGS(pEFlags);
6018 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
6019 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
6020
6021 IEM_MC_ADVANCE_RIP();
6022 IEM_MC_END();
6023 }
6024 return VINF_SUCCESS;
6025 }
6026
6027 case IEMMODE_32BIT:
6028 {
6029 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6030 IEMOP_HLP_NO_LOCK_PREFIX();
6031 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6032 {
6033 /* register operand */
6034 IEM_MC_BEGIN(3, 1);
6035 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6036 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
6037 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6038 IEM_MC_LOCAL(uint32_t, u32Tmp);
6039
6040 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6041 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
6042 IEM_MC_REF_EFLAGS(pEFlags);
6043 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
6044 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
6045
6046 IEM_MC_ADVANCE_RIP();
6047 IEM_MC_END();
6048 }
6049 else
6050 {
6051 /* memory operand */
6052 IEM_MC_BEGIN(3, 2);
6053 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6054 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
6055 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6056 IEM_MC_LOCAL(uint32_t, u32Tmp);
6057 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6058
6059 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6060 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6061 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
6062 IEM_MC_REF_EFLAGS(pEFlags);
6063 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
6064 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
6065
6066 IEM_MC_ADVANCE_RIP();
6067 IEM_MC_END();
6068 }
6069 return VINF_SUCCESS;
6070 }
6071
6072 case IEMMODE_64BIT:
6073 {
6074 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6075 IEMOP_HLP_NO_LOCK_PREFIX();
6076 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6077 {
6078 /* register operand */
6079 IEM_MC_BEGIN(3, 1);
6080 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6081 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
6082 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6083 IEM_MC_LOCAL(uint64_t, u64Tmp);
6084
6085 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6086 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
6087 IEM_MC_REF_EFLAGS(pEFlags);
6088 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6089 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6090
6091 IEM_MC_ADVANCE_RIP();
6092 IEM_MC_END();
6093 }
6094 else
6095 {
6096 /* memory operand */
6097 IEM_MC_BEGIN(3, 2);
6098 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6099 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
6100 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6101 IEM_MC_LOCAL(uint64_t, u64Tmp);
6102 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6103
6104 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6105 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6106 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
6107 IEM_MC_REF_EFLAGS(pEFlags);
6108 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6109 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6110
6111 IEM_MC_ADVANCE_RIP();
6112 IEM_MC_END();
6113 }
6114 return VINF_SUCCESS;
6115 }
6116 }
6117 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6118}
6119
6120
6121/** Opcode 0x6a. */
6122FNIEMOP_DEF(iemOp_push_Ib)
6123{
6124 IEMOP_MNEMONIC("push Ib");
6125 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6126 IEMOP_HLP_NO_LOCK_PREFIX();
6127 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6128
6129 IEM_MC_BEGIN(0,0);
6130 switch (pIemCpu->enmEffOpSize)
6131 {
6132 case IEMMODE_16BIT:
6133 IEM_MC_PUSH_U16(i8Imm);
6134 break;
6135 case IEMMODE_32BIT:
6136 IEM_MC_PUSH_U32(i8Imm);
6137 break;
6138 case IEMMODE_64BIT:
6139 IEM_MC_PUSH_U64(i8Imm);
6140 break;
6141 }
6142 IEM_MC_ADVANCE_RIP();
6143 IEM_MC_END();
6144 return VINF_SUCCESS;
6145}
6146
6147
6148/** Opcode 0x6b. */
6149FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
6150{
6151 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
6152 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6153 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6154 IEMOP_HLP_NO_LOCK_PREFIX();
6155 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
6156
6157 switch (pIemCpu->enmEffOpSize)
6158 {
6159 case IEMMODE_16BIT:
6160 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6161 {
6162 /* register operand */
6163 IEM_MC_BEGIN(3, 1);
6164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6165 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
6166 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6167 IEM_MC_LOCAL(uint16_t, u16Tmp);
6168
6169 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6170 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
6171 IEM_MC_REF_EFLAGS(pEFlags);
6172 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
6173 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
6174
6175 IEM_MC_ADVANCE_RIP();
6176 IEM_MC_END();
6177 }
6178 else
6179 {
6180 /* memory operand */
6181 IEM_MC_BEGIN(3, 2);
6182 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6183 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
6184 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6185 IEM_MC_LOCAL(uint16_t, u16Tmp);
6186 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6187
6188 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6189 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6190 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
6191 IEM_MC_REF_EFLAGS(pEFlags);
6192 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
6193 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
6194
6195 IEM_MC_ADVANCE_RIP();
6196 IEM_MC_END();
6197 }
6198 return VINF_SUCCESS;
6199
6200 case IEMMODE_32BIT:
6201 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6202 {
6203 /* register operand */
6204 IEM_MC_BEGIN(3, 1);
6205 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6206 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
6207 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6208 IEM_MC_LOCAL(uint32_t, u32Tmp);
6209
6210 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6211 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
6212 IEM_MC_REF_EFLAGS(pEFlags);
6213 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
6214 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
6215
6216 IEM_MC_ADVANCE_RIP();
6217 IEM_MC_END();
6218 }
6219 else
6220 {
6221 /* memory operand */
6222 IEM_MC_BEGIN(3, 2);
6223 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6224 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
6225 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6226 IEM_MC_LOCAL(uint32_t, u32Tmp);
6227 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6228
6229 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6230 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6231 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
6232 IEM_MC_REF_EFLAGS(pEFlags);
6233 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
6234 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
6235
6236 IEM_MC_ADVANCE_RIP();
6237 IEM_MC_END();
6238 }
6239 return VINF_SUCCESS;
6240
6241 case IEMMODE_64BIT:
6242 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6243 {
6244 /* register operand */
6245 IEM_MC_BEGIN(3, 1);
6246 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6247 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
6248 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6249 IEM_MC_LOCAL(uint64_t, u64Tmp);
6250
6251 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6252 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
6253 IEM_MC_REF_EFLAGS(pEFlags);
6254 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6255 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6256
6257 IEM_MC_ADVANCE_RIP();
6258 IEM_MC_END();
6259 }
6260 else
6261 {
6262 /* memory operand */
6263 IEM_MC_BEGIN(3, 2);
6264 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6265 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
6266 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6267 IEM_MC_LOCAL(uint64_t, u64Tmp);
6268 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6269
6270 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6271 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
6272 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
6273 IEM_MC_REF_EFLAGS(pEFlags);
6274 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
6275 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
6276
6277 IEM_MC_ADVANCE_RIP();
6278 IEM_MC_END();
6279 }
6280 return VINF_SUCCESS;
6281 }
6282 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6283}
6284
6285
6286/** Opcode 0x6c. */
6287FNIEMOP_DEF(iemOp_insb_Yb_DX)
6288{
6289 IEMOP_HLP_NO_LOCK_PREFIX();
6290 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6291 {
6292 IEMOP_MNEMONIC("rep ins Yb,DX");
6293 switch (pIemCpu->enmEffAddrMode)
6294 {
6295 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
6296 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
6297 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
6298 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6299 }
6300 }
6301 else
6302 {
6303 IEMOP_MNEMONIC("ins Yb,DX");
6304 switch (pIemCpu->enmEffAddrMode)
6305 {
6306 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
6307 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
6308 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
6309 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6310 }
6311 }
6312}
6313
6314
6315/** Opcode 0x6d. */
6316FNIEMOP_DEF(iemOp_inswd_Yv_DX)
6317{
6318 IEMOP_HLP_NO_LOCK_PREFIX();
6319 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6320 {
6321 IEMOP_MNEMONIC("rep ins Yv,DX");
6322 switch (pIemCpu->enmEffOpSize)
6323 {
6324 case IEMMODE_16BIT:
6325 switch (pIemCpu->enmEffAddrMode)
6326 {
6327 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
6328 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
6329 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
6330 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6331 }
6332 break;
6333 case IEMMODE_64BIT:
6334 case IEMMODE_32BIT:
6335 switch (pIemCpu->enmEffAddrMode)
6336 {
6337 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
6338 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
6339 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
6340 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6341 }
6342 break;
6343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6344 }
6345 }
6346 else
6347 {
6348 IEMOP_MNEMONIC("ins Yv,DX");
6349 switch (pIemCpu->enmEffOpSize)
6350 {
6351 case IEMMODE_16BIT:
6352 switch (pIemCpu->enmEffAddrMode)
6353 {
6354 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
6355 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
6356 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
6357 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6358 }
6359 break;
6360 case IEMMODE_64BIT:
6361 case IEMMODE_32BIT:
6362 switch (pIemCpu->enmEffAddrMode)
6363 {
6364 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
6365 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
6366 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
6367 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6368 }
6369 break;
6370 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6371 }
6372 }
6373}
6374
6375
6376/** Opcode 0x6e. */
6377FNIEMOP_DEF(iemOp_outsb_Yb_DX)
6378{
6379 IEMOP_HLP_NO_LOCK_PREFIX();
6380 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6381 {
6382 IEMOP_MNEMONIC("rep out DX,Yb");
6383 switch (pIemCpu->enmEffAddrMode)
6384 {
6385 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
6386 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
6387 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
6388 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6389 }
6390 }
6391 else
6392 {
6393 IEMOP_MNEMONIC("out DX,Yb");
6394 switch (pIemCpu->enmEffAddrMode)
6395 {
6396 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
6397 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
6398 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
6399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6400 }
6401 }
6402}
6403
6404
6405/** Opcode 0x6f. */
6406FNIEMOP_DEF(iemOp_outswd_Yv_DX)
6407{
6408 IEMOP_HLP_NO_LOCK_PREFIX();
6409 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6410 {
6411 IEMOP_MNEMONIC("rep outs DX,Yv");
6412 switch (pIemCpu->enmEffOpSize)
6413 {
6414 case IEMMODE_16BIT:
6415 switch (pIemCpu->enmEffAddrMode)
6416 {
6417 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
6418 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
6419 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
6420 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6421 }
6422 break;
6423 case IEMMODE_64BIT:
6424 case IEMMODE_32BIT:
6425 switch (pIemCpu->enmEffAddrMode)
6426 {
6427 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
6428 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
6429 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
6430 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6431 }
6432 break;
6433 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6434 }
6435 }
6436 else
6437 {
6438 IEMOP_MNEMONIC("outs DX,Yv");
6439 switch (pIemCpu->enmEffOpSize)
6440 {
6441 case IEMMODE_16BIT:
6442 switch (pIemCpu->enmEffAddrMode)
6443 {
6444 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
6445 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
6446 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
6447 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6448 }
6449 break;
6450 case IEMMODE_64BIT:
6451 case IEMMODE_32BIT:
6452 switch (pIemCpu->enmEffAddrMode)
6453 {
6454 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
6455 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
6456 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
6457 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6458 }
6459 break;
6460 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6461 }
6462 }
6463}
6464
6465
6466/** Opcode 0x70. */
6467FNIEMOP_DEF(iemOp_jo_Jb)
6468{
6469 IEMOP_MNEMONIC("jo Jb");
6470 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6471 IEMOP_HLP_NO_LOCK_PREFIX();
6472 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6473
6474 IEM_MC_BEGIN(0, 0);
6475 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6476 IEM_MC_REL_JMP_S8(i8Imm);
6477 } IEM_MC_ELSE() {
6478 IEM_MC_ADVANCE_RIP();
6479 } IEM_MC_ENDIF();
6480 IEM_MC_END();
6481 return VINF_SUCCESS;
6482}
6483
6484
6485/** Opcode 0x71. */
6486FNIEMOP_DEF(iemOp_jno_Jb)
6487{
6488 IEMOP_MNEMONIC("jno Jb");
6489 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6490 IEMOP_HLP_NO_LOCK_PREFIX();
6491 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6492
6493 IEM_MC_BEGIN(0, 0);
6494 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6495 IEM_MC_ADVANCE_RIP();
6496 } IEM_MC_ELSE() {
6497 IEM_MC_REL_JMP_S8(i8Imm);
6498 } IEM_MC_ENDIF();
6499 IEM_MC_END();
6500 return VINF_SUCCESS;
6501}
6502
6503/** Opcode 0x72. */
6504FNIEMOP_DEF(iemOp_jc_Jb)
6505{
6506 IEMOP_MNEMONIC("jc/jnae Jb");
6507 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6508 IEMOP_HLP_NO_LOCK_PREFIX();
6509 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6510
6511 IEM_MC_BEGIN(0, 0);
6512 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6513 IEM_MC_REL_JMP_S8(i8Imm);
6514 } IEM_MC_ELSE() {
6515 IEM_MC_ADVANCE_RIP();
6516 } IEM_MC_ENDIF();
6517 IEM_MC_END();
6518 return VINF_SUCCESS;
6519}
6520
6521
6522/** Opcode 0x73. */
6523FNIEMOP_DEF(iemOp_jnc_Jb)
6524{
6525 IEMOP_MNEMONIC("jnc/jnb Jb");
6526 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6527 IEMOP_HLP_NO_LOCK_PREFIX();
6528 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6529
6530 IEM_MC_BEGIN(0, 0);
6531 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6532 IEM_MC_ADVANCE_RIP();
6533 } IEM_MC_ELSE() {
6534 IEM_MC_REL_JMP_S8(i8Imm);
6535 } IEM_MC_ENDIF();
6536 IEM_MC_END();
6537 return VINF_SUCCESS;
6538}
6539
6540
6541/** Opcode 0x74. */
6542FNIEMOP_DEF(iemOp_je_Jb)
6543{
6544 IEMOP_MNEMONIC("je/jz Jb");
6545 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6546 IEMOP_HLP_NO_LOCK_PREFIX();
6547 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6548
6549 IEM_MC_BEGIN(0, 0);
6550 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6551 IEM_MC_REL_JMP_S8(i8Imm);
6552 } IEM_MC_ELSE() {
6553 IEM_MC_ADVANCE_RIP();
6554 } IEM_MC_ENDIF();
6555 IEM_MC_END();
6556 return VINF_SUCCESS;
6557}
6558
6559
6560/** Opcode 0x75. */
6561FNIEMOP_DEF(iemOp_jne_Jb)
6562{
6563 IEMOP_MNEMONIC("jne/jnz Jb");
6564 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6565 IEMOP_HLP_NO_LOCK_PREFIX();
6566 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6567
6568 IEM_MC_BEGIN(0, 0);
6569 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6570 IEM_MC_ADVANCE_RIP();
6571 } IEM_MC_ELSE() {
6572 IEM_MC_REL_JMP_S8(i8Imm);
6573 } IEM_MC_ENDIF();
6574 IEM_MC_END();
6575 return VINF_SUCCESS;
6576}
6577
6578
6579/** Opcode 0x76. */
6580FNIEMOP_DEF(iemOp_jbe_Jb)
6581{
6582 IEMOP_MNEMONIC("jbe/jna Jb");
6583 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6584 IEMOP_HLP_NO_LOCK_PREFIX();
6585 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6586
6587 IEM_MC_BEGIN(0, 0);
6588 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6589 IEM_MC_REL_JMP_S8(i8Imm);
6590 } IEM_MC_ELSE() {
6591 IEM_MC_ADVANCE_RIP();
6592 } IEM_MC_ENDIF();
6593 IEM_MC_END();
6594 return VINF_SUCCESS;
6595}
6596
6597
6598/** Opcode 0x77. */
6599FNIEMOP_DEF(iemOp_jnbe_Jb)
6600{
6601 IEMOP_MNEMONIC("jnbe/ja Jb");
6602 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6603 IEMOP_HLP_NO_LOCK_PREFIX();
6604 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6605
6606 IEM_MC_BEGIN(0, 0);
6607 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6608 IEM_MC_ADVANCE_RIP();
6609 } IEM_MC_ELSE() {
6610 IEM_MC_REL_JMP_S8(i8Imm);
6611 } IEM_MC_ENDIF();
6612 IEM_MC_END();
6613 return VINF_SUCCESS;
6614}
6615
6616
6617/** Opcode 0x78. */
6618FNIEMOP_DEF(iemOp_js_Jb)
6619{
6620 IEMOP_MNEMONIC("js Jb");
6621 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6622 IEMOP_HLP_NO_LOCK_PREFIX();
6623 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6624
6625 IEM_MC_BEGIN(0, 0);
6626 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6627 IEM_MC_REL_JMP_S8(i8Imm);
6628 } IEM_MC_ELSE() {
6629 IEM_MC_ADVANCE_RIP();
6630 } IEM_MC_ENDIF();
6631 IEM_MC_END();
6632 return VINF_SUCCESS;
6633}
6634
6635
6636/** Opcode 0x79. */
6637FNIEMOP_DEF(iemOp_jns_Jb)
6638{
6639 IEMOP_MNEMONIC("jns Jb");
6640 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6641 IEMOP_HLP_NO_LOCK_PREFIX();
6642 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6643
6644 IEM_MC_BEGIN(0, 0);
6645 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6646 IEM_MC_ADVANCE_RIP();
6647 } IEM_MC_ELSE() {
6648 IEM_MC_REL_JMP_S8(i8Imm);
6649 } IEM_MC_ENDIF();
6650 IEM_MC_END();
6651 return VINF_SUCCESS;
6652}
6653
6654
6655/** Opcode 0x7a. */
6656FNIEMOP_DEF(iemOp_jp_Jb)
6657{
6658 IEMOP_MNEMONIC("jp Jb");
6659 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6660 IEMOP_HLP_NO_LOCK_PREFIX();
6661 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6662
6663 IEM_MC_BEGIN(0, 0);
6664 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6665 IEM_MC_REL_JMP_S8(i8Imm);
6666 } IEM_MC_ELSE() {
6667 IEM_MC_ADVANCE_RIP();
6668 } IEM_MC_ENDIF();
6669 IEM_MC_END();
6670 return VINF_SUCCESS;
6671}
6672
6673
6674/** Opcode 0x7b. */
6675FNIEMOP_DEF(iemOp_jnp_Jb)
6676{
6677 IEMOP_MNEMONIC("jnp Jb");
6678 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6679 IEMOP_HLP_NO_LOCK_PREFIX();
6680 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6681
6682 IEM_MC_BEGIN(0, 0);
6683 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6684 IEM_MC_ADVANCE_RIP();
6685 } IEM_MC_ELSE() {
6686 IEM_MC_REL_JMP_S8(i8Imm);
6687 } IEM_MC_ENDIF();
6688 IEM_MC_END();
6689 return VINF_SUCCESS;
6690}
6691
6692
6693/** Opcode 0x7c. */
6694FNIEMOP_DEF(iemOp_jl_Jb)
6695{
6696 IEMOP_MNEMONIC("jl/jnge Jb");
6697 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6698 IEMOP_HLP_NO_LOCK_PREFIX();
6699 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6700
6701 IEM_MC_BEGIN(0, 0);
6702 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6703 IEM_MC_REL_JMP_S8(i8Imm);
6704 } IEM_MC_ELSE() {
6705 IEM_MC_ADVANCE_RIP();
6706 } IEM_MC_ENDIF();
6707 IEM_MC_END();
6708 return VINF_SUCCESS;
6709}
6710
6711
6712/** Opcode 0x7d. */
6713FNIEMOP_DEF(iemOp_jnl_Jb)
6714{
6715 IEMOP_MNEMONIC("jnl/jge Jb");
6716 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6717 IEMOP_HLP_NO_LOCK_PREFIX();
6718 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6719
6720 IEM_MC_BEGIN(0, 0);
6721 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6722 IEM_MC_ADVANCE_RIP();
6723 } IEM_MC_ELSE() {
6724 IEM_MC_REL_JMP_S8(i8Imm);
6725 } IEM_MC_ENDIF();
6726 IEM_MC_END();
6727 return VINF_SUCCESS;
6728}
6729
6730
6731/** Opcode 0x7e. */
6732FNIEMOP_DEF(iemOp_jle_Jb)
6733{
6734 IEMOP_MNEMONIC("jle/jng Jb");
6735 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6736 IEMOP_HLP_NO_LOCK_PREFIX();
6737 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6738
6739 IEM_MC_BEGIN(0, 0);
6740 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6741 IEM_MC_REL_JMP_S8(i8Imm);
6742 } IEM_MC_ELSE() {
6743 IEM_MC_ADVANCE_RIP();
6744 } IEM_MC_ENDIF();
6745 IEM_MC_END();
6746 return VINF_SUCCESS;
6747}
6748
6749
6750/** Opcode 0x7f. */
6751FNIEMOP_DEF(iemOp_jnle_Jb)
6752{
6753 IEMOP_MNEMONIC("jnle/jg Jb");
6754 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6755 IEMOP_HLP_NO_LOCK_PREFIX();
6756 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6757
6758 IEM_MC_BEGIN(0, 0);
6759 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6760 IEM_MC_ADVANCE_RIP();
6761 } IEM_MC_ELSE() {
6762 IEM_MC_REL_JMP_S8(i8Imm);
6763 } IEM_MC_ENDIF();
6764 IEM_MC_END();
6765 return VINF_SUCCESS;
6766}
6767
6768
6769/** Opcode 0x80. */
6770FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6771{
6772 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6773 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6774 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6775
6776 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6777 {
6778 /* register target */
6779 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6780 IEMOP_HLP_NO_LOCK_PREFIX();
6781 IEM_MC_BEGIN(3, 0);
6782 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6783 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6784 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6785
6786 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6787 IEM_MC_REF_EFLAGS(pEFlags);
6788 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6789
6790 IEM_MC_ADVANCE_RIP();
6791 IEM_MC_END();
6792 }
6793 else
6794 {
6795 /* memory target */
6796 uint32_t fAccess;
6797 if (pImpl->pfnLockedU8)
6798 fAccess = IEM_ACCESS_DATA_RW;
6799 else
6800 { /* CMP */
6801 IEMOP_HLP_NO_LOCK_PREFIX();
6802 fAccess = IEM_ACCESS_DATA_R;
6803 }
6804 IEM_MC_BEGIN(3, 2);
6805 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6806 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6807 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6808
6809 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6810 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6811 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6812
6813 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6814 IEM_MC_FETCH_EFLAGS(EFlags);
6815 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6816 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6817 else
6818 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6819
6820 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6821 IEM_MC_COMMIT_EFLAGS(EFlags);
6822 IEM_MC_ADVANCE_RIP();
6823 IEM_MC_END();
6824 }
6825 return VINF_SUCCESS;
6826}
6827
6828
6829/** Opcode 0x81. */
6830FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6831{
6832 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6833 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6834 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6835
6836 switch (pIemCpu->enmEffOpSize)
6837 {
6838 case IEMMODE_16BIT:
6839 {
6840 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6841 {
6842 /* register target */
6843 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6844 IEMOP_HLP_NO_LOCK_PREFIX();
6845 IEM_MC_BEGIN(3, 0);
6846 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6847 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6848 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6849
6850 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6851 IEM_MC_REF_EFLAGS(pEFlags);
6852 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6853
6854 IEM_MC_ADVANCE_RIP();
6855 IEM_MC_END();
6856 }
6857 else
6858 {
6859 /* memory target */
6860 uint32_t fAccess;
6861 if (pImpl->pfnLockedU16)
6862 fAccess = IEM_ACCESS_DATA_RW;
6863 else
6864 { /* CMP, TEST */
6865 IEMOP_HLP_NO_LOCK_PREFIX();
6866 fAccess = IEM_ACCESS_DATA_R;
6867 }
6868 IEM_MC_BEGIN(3, 2);
6869 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6870 IEM_MC_ARG(uint16_t, u16Src, 1);
6871 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6872 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6873
6874 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6875 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6876 IEM_MC_ASSIGN(u16Src, u16Imm);
6877 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6878 IEM_MC_FETCH_EFLAGS(EFlags);
6879 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6880 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6881 else
6882 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6883
6884 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6885 IEM_MC_COMMIT_EFLAGS(EFlags);
6886 IEM_MC_ADVANCE_RIP();
6887 IEM_MC_END();
6888 }
6889 break;
6890 }
6891
6892 case IEMMODE_32BIT:
6893 {
6894 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6895 {
6896 /* register target */
6897 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6898 IEMOP_HLP_NO_LOCK_PREFIX();
6899 IEM_MC_BEGIN(3, 0);
6900 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6901 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6902 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6903
6904 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6905 IEM_MC_REF_EFLAGS(pEFlags);
6906 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6907
6908 IEM_MC_ADVANCE_RIP();
6909 IEM_MC_END();
6910 }
6911 else
6912 {
6913 /* memory target */
6914 uint32_t fAccess;
6915 if (pImpl->pfnLockedU32)
6916 fAccess = IEM_ACCESS_DATA_RW;
6917 else
6918 { /* CMP, TEST */
6919 IEMOP_HLP_NO_LOCK_PREFIX();
6920 fAccess = IEM_ACCESS_DATA_R;
6921 }
6922 IEM_MC_BEGIN(3, 2);
6923 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6924 IEM_MC_ARG(uint32_t, u32Src, 1);
6925 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6926 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6927
6928 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6929 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6930 IEM_MC_ASSIGN(u32Src, u32Imm);
6931 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6932 IEM_MC_FETCH_EFLAGS(EFlags);
6933 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6934 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6935 else
6936 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6937
6938 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6939 IEM_MC_COMMIT_EFLAGS(EFlags);
6940 IEM_MC_ADVANCE_RIP();
6941 IEM_MC_END();
6942 }
6943 break;
6944 }
6945
6946 case IEMMODE_64BIT:
6947 {
6948 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6949 {
6950 /* register target */
6951 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6952 IEMOP_HLP_NO_LOCK_PREFIX();
6953 IEM_MC_BEGIN(3, 0);
6954 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6955 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6956 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6957
6958 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6959 IEM_MC_REF_EFLAGS(pEFlags);
6960 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6961
6962 IEM_MC_ADVANCE_RIP();
6963 IEM_MC_END();
6964 }
6965 else
6966 {
6967 /* memory target */
6968 uint32_t fAccess;
6969 if (pImpl->pfnLockedU64)
6970 fAccess = IEM_ACCESS_DATA_RW;
6971 else
6972 { /* CMP */
6973 IEMOP_HLP_NO_LOCK_PREFIX();
6974 fAccess = IEM_ACCESS_DATA_R;
6975 }
6976 IEM_MC_BEGIN(3, 2);
6977 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6978 IEM_MC_ARG(uint64_t, u64Src, 1);
6979 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6980 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6981
6982 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6983 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6984 IEM_MC_ASSIGN(u64Src, u64Imm);
6985 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6986 IEM_MC_FETCH_EFLAGS(EFlags);
6987 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6988 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6989 else
6990 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6991
6992 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6993 IEM_MC_COMMIT_EFLAGS(EFlags);
6994 IEM_MC_ADVANCE_RIP();
6995 IEM_MC_END();
6996 }
6997 break;
6998 }
6999 }
7000 return VINF_SUCCESS;
7001}
7002
7003
7004/** Opcode 0x82. */
7005 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
7006{
7007 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
7008 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
7009}
7010
7011
7012/** Opcode 0x83. */
7013FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
7014{
7015 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7016 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
7017 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
7018
7019 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7020 {
7021 /*
7022 * Register target
7023 */
7024 IEMOP_HLP_NO_LOCK_PREFIX();
7025 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
7026 switch (pIemCpu->enmEffOpSize)
7027 {
7028 case IEMMODE_16BIT:
7029 {
7030 IEM_MC_BEGIN(3, 0);
7031 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
7032 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
7033 IEM_MC_ARG(uint32_t *, pEFlags, 2);
7034
7035 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7036 IEM_MC_REF_EFLAGS(pEFlags);
7037 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
7038
7039 IEM_MC_ADVANCE_RIP();
7040 IEM_MC_END();
7041 break;
7042 }
7043
7044 case IEMMODE_32BIT:
7045 {
7046 IEM_MC_BEGIN(3, 0);
7047 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
7048 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
7049 IEM_MC_ARG(uint32_t *, pEFlags, 2);
7050
7051 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7052 IEM_MC_REF_EFLAGS(pEFlags);
7053 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
7054
7055 IEM_MC_ADVANCE_RIP();
7056 IEM_MC_END();
7057 break;
7058 }
7059
7060 case IEMMODE_64BIT:
7061 {
7062 IEM_MC_BEGIN(3, 0);
7063 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
7064 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
7065 IEM_MC_ARG(uint32_t *, pEFlags, 2);
7066
7067 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7068 IEM_MC_REF_EFLAGS(pEFlags);
7069 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
7070
7071 IEM_MC_ADVANCE_RIP();
7072 IEM_MC_END();
7073 break;
7074 }
7075 }
7076 }
7077 else
7078 {
7079 /*
7080 * Memory target.
7081 */
7082 uint32_t fAccess;
7083 if (pImpl->pfnLockedU16)
7084 fAccess = IEM_ACCESS_DATA_RW;
7085 else
7086 { /* CMP */
7087 IEMOP_HLP_NO_LOCK_PREFIX();
7088 fAccess = IEM_ACCESS_DATA_R;
7089 }
7090
7091 switch (pIemCpu->enmEffOpSize)
7092 {
7093 case IEMMODE_16BIT:
7094 {
7095 IEM_MC_BEGIN(3, 2);
7096 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
7097 IEM_MC_ARG(uint16_t, u16Src, 1);
7098 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
7099 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7100
7101 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7102 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
7103 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
7104 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7105 IEM_MC_FETCH_EFLAGS(EFlags);
7106 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
7107 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
7108 else
7109 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
7110
7111 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
7112 IEM_MC_COMMIT_EFLAGS(EFlags);
7113 IEM_MC_ADVANCE_RIP();
7114 IEM_MC_END();
7115 break;
7116 }
7117
7118 case IEMMODE_32BIT:
7119 {
7120 IEM_MC_BEGIN(3, 2);
7121 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
7122 IEM_MC_ARG(uint32_t, u32Src, 1);
7123 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
7124 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7125
7126 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7127 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
7128 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
7129 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7130 IEM_MC_FETCH_EFLAGS(EFlags);
7131 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
7132 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
7133 else
7134 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
7135
7136 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
7137 IEM_MC_COMMIT_EFLAGS(EFlags);
7138 IEM_MC_ADVANCE_RIP();
7139 IEM_MC_END();
7140 break;
7141 }
7142
7143 case IEMMODE_64BIT:
7144 {
7145 IEM_MC_BEGIN(3, 2);
7146 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
7147 IEM_MC_ARG(uint64_t, u64Src, 1);
7148 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
7149 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7150
7151 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7152 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
7153 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
7154 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7155 IEM_MC_FETCH_EFLAGS(EFlags);
7156 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
7157 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
7158 else
7159 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
7160
7161 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
7162 IEM_MC_COMMIT_EFLAGS(EFlags);
7163 IEM_MC_ADVANCE_RIP();
7164 IEM_MC_END();
7165 break;
7166 }
7167 }
7168 }
7169 return VINF_SUCCESS;
7170}
7171
7172
7173/** Opcode 0x84. */
7174FNIEMOP_DEF(iemOp_test_Eb_Gb)
7175{
7176 IEMOP_MNEMONIC("test Eb,Gb");
7177 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7178 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7179 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
7180}
7181
7182
7183/** Opcode 0x85. */
7184FNIEMOP_DEF(iemOp_test_Ev_Gv)
7185{
7186 IEMOP_MNEMONIC("test Ev,Gv");
7187 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7188 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7189 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
7190}
7191
7192
7193/** Opcode 0x86. */
7194FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
7195{
7196 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7197 IEMOP_MNEMONIC("xchg Eb,Gb");
7198
7199 /*
7200 * If rm is denoting a register, no more instruction bytes.
7201 */
7202 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7203 {
7204 IEMOP_HLP_NO_LOCK_PREFIX();
7205
7206 IEM_MC_BEGIN(0, 2);
7207 IEM_MC_LOCAL(uint8_t, uTmp1);
7208 IEM_MC_LOCAL(uint8_t, uTmp2);
7209
7210 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7211 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7212 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7213 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7214
7215 IEM_MC_ADVANCE_RIP();
7216 IEM_MC_END();
7217 }
7218 else
7219 {
7220 /*
7221 * We're accessing memory.
7222 */
7223/** @todo the register must be committed separately! */
7224 IEM_MC_BEGIN(2, 2);
7225 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
7226 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
7227 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7228
7229 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7230 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7231 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7232 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
7233 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
7234
7235 IEM_MC_ADVANCE_RIP();
7236 IEM_MC_END();
7237 }
7238 return VINF_SUCCESS;
7239}
7240
7241
7242/** Opcode 0x87. */
7243FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
7244{
7245 IEMOP_MNEMONIC("xchg Ev,Gv");
7246 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7247
7248 /*
7249 * If rm is denoting a register, no more instruction bytes.
7250 */
7251 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7252 {
7253 IEMOP_HLP_NO_LOCK_PREFIX();
7254
7255 switch (pIemCpu->enmEffOpSize)
7256 {
7257 case IEMMODE_16BIT:
7258 IEM_MC_BEGIN(0, 2);
7259 IEM_MC_LOCAL(uint16_t, uTmp1);
7260 IEM_MC_LOCAL(uint16_t, uTmp2);
7261
7262 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7263 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7264 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7265 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7266
7267 IEM_MC_ADVANCE_RIP();
7268 IEM_MC_END();
7269 return VINF_SUCCESS;
7270
7271 case IEMMODE_32BIT:
7272 IEM_MC_BEGIN(0, 2);
7273 IEM_MC_LOCAL(uint32_t, uTmp1);
7274 IEM_MC_LOCAL(uint32_t, uTmp2);
7275
7276 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7277 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7278 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7279 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7280
7281 IEM_MC_ADVANCE_RIP();
7282 IEM_MC_END();
7283 return VINF_SUCCESS;
7284
7285 case IEMMODE_64BIT:
7286 IEM_MC_BEGIN(0, 2);
7287 IEM_MC_LOCAL(uint64_t, uTmp1);
7288 IEM_MC_LOCAL(uint64_t, uTmp2);
7289
7290 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7291 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7292 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
7293 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
7294
7295 IEM_MC_ADVANCE_RIP();
7296 IEM_MC_END();
7297 return VINF_SUCCESS;
7298
7299 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7300 }
7301 }
7302 else
7303 {
7304 /*
7305 * We're accessing memory.
7306 */
7307 switch (pIemCpu->enmEffOpSize)
7308 {
7309/** @todo the register must be committed separately! */
7310 case IEMMODE_16BIT:
7311 IEM_MC_BEGIN(2, 2);
7312 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
7313 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
7314 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7315
7316 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7317 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7318 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7319 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
7320 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
7321
7322 IEM_MC_ADVANCE_RIP();
7323 IEM_MC_END();
7324 return VINF_SUCCESS;
7325
7326 case IEMMODE_32BIT:
7327 IEM_MC_BEGIN(2, 2);
7328 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
7329 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
7330 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7331
7332 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7333 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7334 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7335 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
7336 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
7337
7338 IEM_MC_ADVANCE_RIP();
7339 IEM_MC_END();
7340 return VINF_SUCCESS;
7341
7342 case IEMMODE_64BIT:
7343 IEM_MC_BEGIN(2, 2);
7344 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
7345 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
7346 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7347
7348 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7349 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7350 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7351 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
7352 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
7353
7354 IEM_MC_ADVANCE_RIP();
7355 IEM_MC_END();
7356 return VINF_SUCCESS;
7357
7358 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7359 }
7360 }
7361}
7362
7363
7364/** Opcode 0x88. */
7365FNIEMOP_DEF(iemOp_mov_Eb_Gb)
7366{
7367 IEMOP_MNEMONIC("mov Eb,Gb");
7368
7369 uint8_t bRm;
7370 IEM_OPCODE_GET_NEXT_U8(&bRm);
7371 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7372
7373 /*
7374 * If rm is denoting a register, no more instruction bytes.
7375 */
7376 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7377 {
7378 IEM_MC_BEGIN(0, 1);
7379 IEM_MC_LOCAL(uint8_t, u8Value);
7380 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7381 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
7382 IEM_MC_ADVANCE_RIP();
7383 IEM_MC_END();
7384 }
7385 else
7386 {
7387 /*
7388 * We're writing a register to memory.
7389 */
7390 IEM_MC_BEGIN(0, 2);
7391 IEM_MC_LOCAL(uint8_t, u8Value);
7392 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7393 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7394 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7395 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
7396 IEM_MC_ADVANCE_RIP();
7397 IEM_MC_END();
7398 }
7399 return VINF_SUCCESS;
7400
7401}
7402
7403
7404/** Opcode 0x89. */
7405FNIEMOP_DEF(iemOp_mov_Ev_Gv)
7406{
7407 IEMOP_MNEMONIC("mov Ev,Gv");
7408
7409 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7410 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7411
7412 /*
7413 * If rm is denoting a register, no more instruction bytes.
7414 */
7415 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7416 {
7417 switch (pIemCpu->enmEffOpSize)
7418 {
7419 case IEMMODE_16BIT:
7420 IEM_MC_BEGIN(0, 1);
7421 IEM_MC_LOCAL(uint16_t, u16Value);
7422 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7423 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7424 IEM_MC_ADVANCE_RIP();
7425 IEM_MC_END();
7426 break;
7427
7428 case IEMMODE_32BIT:
7429 IEM_MC_BEGIN(0, 1);
7430 IEM_MC_LOCAL(uint32_t, u32Value);
7431 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7432 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7433 IEM_MC_ADVANCE_RIP();
7434 IEM_MC_END();
7435 break;
7436
7437 case IEMMODE_64BIT:
7438 IEM_MC_BEGIN(0, 1);
7439 IEM_MC_LOCAL(uint64_t, u64Value);
7440 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7441 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7442 IEM_MC_ADVANCE_RIP();
7443 IEM_MC_END();
7444 break;
7445 }
7446 }
7447 else
7448 {
7449 /*
7450 * We're writing a register to memory.
7451 */
7452 switch (pIemCpu->enmEffOpSize)
7453 {
7454 case IEMMODE_16BIT:
7455 IEM_MC_BEGIN(0, 2);
7456 IEM_MC_LOCAL(uint16_t, u16Value);
7457 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7458 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7459 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7460 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7461 IEM_MC_ADVANCE_RIP();
7462 IEM_MC_END();
7463 break;
7464
7465 case IEMMODE_32BIT:
7466 IEM_MC_BEGIN(0, 2);
7467 IEM_MC_LOCAL(uint32_t, u32Value);
7468 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7469 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7470 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7471 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
7472 IEM_MC_ADVANCE_RIP();
7473 IEM_MC_END();
7474 break;
7475
7476 case IEMMODE_64BIT:
7477 IEM_MC_BEGIN(0, 2);
7478 IEM_MC_LOCAL(uint64_t, u64Value);
7479 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7480 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7481 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7482 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
7483 IEM_MC_ADVANCE_RIP();
7484 IEM_MC_END();
7485 break;
7486 }
7487 }
7488 return VINF_SUCCESS;
7489}
7490
7491
7492/** Opcode 0x8a. */
7493FNIEMOP_DEF(iemOp_mov_Gb_Eb)
7494{
7495 IEMOP_MNEMONIC("mov Gb,Eb");
7496
7497 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7498 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7499
7500 /*
7501 * If rm is denoting a register, no more instruction bytes.
7502 */
7503 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7504 {
7505 IEM_MC_BEGIN(0, 1);
7506 IEM_MC_LOCAL(uint8_t, u8Value);
7507 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7508 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7509 IEM_MC_ADVANCE_RIP();
7510 IEM_MC_END();
7511 }
7512 else
7513 {
7514 /*
7515 * We're loading a register from memory.
7516 */
7517 IEM_MC_BEGIN(0, 2);
7518 IEM_MC_LOCAL(uint8_t, u8Value);
7519 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7520 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7521 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
7522 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7523 IEM_MC_ADVANCE_RIP();
7524 IEM_MC_END();
7525 }
7526 return VINF_SUCCESS;
7527}
7528
7529
7530/** Opcode 0x8b. */
7531FNIEMOP_DEF(iemOp_mov_Gv_Ev)
7532{
7533 IEMOP_MNEMONIC("mov Gv,Ev");
7534
7535 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7536 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7537
7538 /*
7539 * If rm is denoting a register, no more instruction bytes.
7540 */
7541 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7542 {
7543 switch (pIemCpu->enmEffOpSize)
7544 {
7545 case IEMMODE_16BIT:
7546 IEM_MC_BEGIN(0, 1);
7547 IEM_MC_LOCAL(uint16_t, u16Value);
7548 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7549 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7550 IEM_MC_ADVANCE_RIP();
7551 IEM_MC_END();
7552 break;
7553
7554 case IEMMODE_32BIT:
7555 IEM_MC_BEGIN(0, 1);
7556 IEM_MC_LOCAL(uint32_t, u32Value);
7557 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7558 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7559 IEM_MC_ADVANCE_RIP();
7560 IEM_MC_END();
7561 break;
7562
7563 case IEMMODE_64BIT:
7564 IEM_MC_BEGIN(0, 1);
7565 IEM_MC_LOCAL(uint64_t, u64Value);
7566 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7567 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7568 IEM_MC_ADVANCE_RIP();
7569 IEM_MC_END();
7570 break;
7571 }
7572 }
7573 else
7574 {
7575 /*
7576 * We're loading a register from memory.
7577 */
7578 switch (pIemCpu->enmEffOpSize)
7579 {
7580 case IEMMODE_16BIT:
7581 IEM_MC_BEGIN(0, 2);
7582 IEM_MC_LOCAL(uint16_t, u16Value);
7583 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7584 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7585 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7586 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7587 IEM_MC_ADVANCE_RIP();
7588 IEM_MC_END();
7589 break;
7590
7591 case IEMMODE_32BIT:
7592 IEM_MC_BEGIN(0, 2);
7593 IEM_MC_LOCAL(uint32_t, u32Value);
7594 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7595 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7596 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7597 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7598 IEM_MC_ADVANCE_RIP();
7599 IEM_MC_END();
7600 break;
7601
7602 case IEMMODE_64BIT:
7603 IEM_MC_BEGIN(0, 2);
7604 IEM_MC_LOCAL(uint64_t, u64Value);
7605 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7606 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7607 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7608 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7609 IEM_MC_ADVANCE_RIP();
7610 IEM_MC_END();
7611 break;
7612 }
7613 }
7614 return VINF_SUCCESS;
7615}
7616
7617
7618/** Opcode 0x8c. */
7619FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7620{
7621 IEMOP_MNEMONIC("mov Ev,Sw");
7622
7623 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7624 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7625
7626 /*
7627 * Check that the destination register exists. The REX.R prefix is ignored.
7628 */
7629 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7630 if ( iSegReg > X86_SREG_GS)
7631 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7632
7633 /*
7634 * If rm is denoting a register, no more instruction bytes.
7635 * In that case, the operand size is respected and the upper bits are
7636 * cleared (starting with some pentium).
7637 */
7638 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7639 {
7640 switch (pIemCpu->enmEffOpSize)
7641 {
7642 case IEMMODE_16BIT:
7643 IEM_MC_BEGIN(0, 1);
7644 IEM_MC_LOCAL(uint16_t, u16Value);
7645 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7646 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7647 IEM_MC_ADVANCE_RIP();
7648 IEM_MC_END();
7649 break;
7650
7651 case IEMMODE_32BIT:
7652 IEM_MC_BEGIN(0, 1);
7653 IEM_MC_LOCAL(uint32_t, u32Value);
7654 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7655 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7656 IEM_MC_ADVANCE_RIP();
7657 IEM_MC_END();
7658 break;
7659
7660 case IEMMODE_64BIT:
7661 IEM_MC_BEGIN(0, 1);
7662 IEM_MC_LOCAL(uint64_t, u64Value);
7663 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7664 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7665 IEM_MC_ADVANCE_RIP();
7666 IEM_MC_END();
7667 break;
7668 }
7669 }
7670 else
7671 {
7672 /*
7673 * We're saving the register to memory. The access is word sized
7674 * regardless of operand size prefixes.
7675 */
7676#if 0 /* not necessary */
7677 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7678#endif
7679 IEM_MC_BEGIN(0, 2);
7680 IEM_MC_LOCAL(uint16_t, u16Value);
7681 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7682 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7683 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7684 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7685 IEM_MC_ADVANCE_RIP();
7686 IEM_MC_END();
7687 }
7688 return VINF_SUCCESS;
7689}
7690
7691
7692
7693
7694/** Opcode 0x8d. */
7695FNIEMOP_DEF(iemOp_lea_Gv_M)
7696{
7697 IEMOP_MNEMONIC("lea Gv,M");
7698 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7699 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7700 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7701 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7702
7703 switch (pIemCpu->enmEffOpSize)
7704 {
7705 case IEMMODE_16BIT:
7706 IEM_MC_BEGIN(0, 2);
7707 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7708 IEM_MC_LOCAL(uint16_t, u16Cast);
7709 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7710 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
7711 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
7712 IEM_MC_ADVANCE_RIP();
7713 IEM_MC_END();
7714 return VINF_SUCCESS;
7715
7716 case IEMMODE_32BIT:
7717 IEM_MC_BEGIN(0, 2);
7718 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7719 IEM_MC_LOCAL(uint32_t, u32Cast);
7720 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7721 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
7722 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
7723 IEM_MC_ADVANCE_RIP();
7724 IEM_MC_END();
7725 return VINF_SUCCESS;
7726
7727 case IEMMODE_64BIT:
7728 IEM_MC_BEGIN(0, 1);
7729 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7731 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7732 IEM_MC_ADVANCE_RIP();
7733 IEM_MC_END();
7734 return VINF_SUCCESS;
7735 }
7736 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7737}
7738
7739
7740/** Opcode 0x8e. */
7741FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7742{
7743 IEMOP_MNEMONIC("mov Sw,Ev");
7744
7745 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7746 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7747
7748 /*
7749 * The practical operand size is 16-bit.
7750 */
7751#if 0 /* not necessary */
7752 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7753#endif
7754
7755 /*
7756 * Check that the destination register exists and can be used with this
7757 * instruction. The REX.R prefix is ignored.
7758 */
7759 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7760 if ( iSegReg == X86_SREG_CS
7761 || iSegReg > X86_SREG_GS)
7762 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7763
7764 /*
7765 * If rm is denoting a register, no more instruction bytes.
7766 */
7767 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7768 {
7769 IEM_MC_BEGIN(2, 0);
7770 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7771 IEM_MC_ARG(uint16_t, u16Value, 1);
7772 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7773 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7774 IEM_MC_END();
7775 }
7776 else
7777 {
7778 /*
7779 * We're loading the register from memory. The access is word sized
7780 * regardless of operand size prefixes.
7781 */
7782 IEM_MC_BEGIN(2, 1);
7783 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7784 IEM_MC_ARG(uint16_t, u16Value, 1);
7785 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7786 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7787 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7788 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7789 IEM_MC_END();
7790 }
7791 return VINF_SUCCESS;
7792}
7793
7794
7795/** Opcode 0x8f. */
7796FNIEMOP_DEF(iemOp_pop_Ev)
7797{
7798 /* This bugger is rather annoying as it requires rSP to be updated before
7799 doing the effective address calculations. Will eventually require a
7800 split between the R/M+SIB decoding and the effective address
7801 calculation - which is something that is required for any attempt at
7802 reusing this code for a recompiler. It may also be good to have if we
7803 need to delay #UD exception caused by invalid lock prefixes.
7804
7805 For now, we'll do a mostly safe interpreter-only implementation here. */
7806 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7807 * now until tests show it's checked.. */
7808 IEMOP_MNEMONIC("pop Ev");
7809 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7810 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7811
7812 /* Register access is relatively easy and can share code. */
7813 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7814 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7815
7816 /*
7817 * Memory target.
7818 *
7819 * Intel says that RSP is incremented before it's used in any effective
7820 * address calcuations. This means some serious extra annoyance here since
7821 * we decode and caclulate the effective address in one step and like to
7822 * delay committing registers till everything is done.
7823 *
7824 * So, we'll decode and calculate the effective address twice. This will
7825 * require some recoding if turned into a recompiler.
7826 */
7827 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7828
7829#ifndef TST_IEM_CHECK_MC
7830 /* Calc effective address with modified ESP. */
7831 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7832 RTGCPTR GCPtrEff;
7833 VBOXSTRICTRC rcStrict;
7834 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7835 if (rcStrict != VINF_SUCCESS)
7836 return rcStrict;
7837 pIemCpu->offOpcode = offOpcodeSaved;
7838
7839 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7840 uint64_t const RspSaved = pCtx->rsp;
7841 switch (pIemCpu->enmEffOpSize)
7842 {
7843 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7844 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7845 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7846 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7847 }
7848 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7849 Assert(rcStrict == VINF_SUCCESS);
7850 pCtx->rsp = RspSaved;
7851
7852 /* Perform the operation - this should be CImpl. */
7853 RTUINT64U TmpRsp;
7854 TmpRsp.u = pCtx->rsp;
7855 switch (pIemCpu->enmEffOpSize)
7856 {
7857 case IEMMODE_16BIT:
7858 {
7859 uint16_t u16Value;
7860 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7861 if (rcStrict == VINF_SUCCESS)
7862 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7863 break;
7864 }
7865
7866 case IEMMODE_32BIT:
7867 {
7868 uint32_t u32Value;
7869 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7870 if (rcStrict == VINF_SUCCESS)
7871 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7872 break;
7873 }
7874
7875 case IEMMODE_64BIT:
7876 {
7877 uint64_t u64Value;
7878 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7879 if (rcStrict == VINF_SUCCESS)
7880 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7881 break;
7882 }
7883
7884 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7885 }
7886 if (rcStrict == VINF_SUCCESS)
7887 {
7888 pCtx->rsp = TmpRsp.u;
7889 iemRegUpdateRip(pIemCpu);
7890 }
7891 return rcStrict;
7892
7893#else
7894 return VERR_IEM_IPE_2;
7895#endif
7896}
7897
7898
7899/**
7900 * Common 'xchg reg,rAX' helper.
7901 */
7902FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7903{
7904 IEMOP_HLP_NO_LOCK_PREFIX();
7905
7906 iReg |= pIemCpu->uRexB;
7907 switch (pIemCpu->enmEffOpSize)
7908 {
7909 case IEMMODE_16BIT:
7910 IEM_MC_BEGIN(0, 2);
7911 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7912 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7913 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7914 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7915 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7916 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7917 IEM_MC_ADVANCE_RIP();
7918 IEM_MC_END();
7919 return VINF_SUCCESS;
7920
7921 case IEMMODE_32BIT:
7922 IEM_MC_BEGIN(0, 2);
7923 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7924 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7925 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7926 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7927 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7928 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7929 IEM_MC_ADVANCE_RIP();
7930 IEM_MC_END();
7931 return VINF_SUCCESS;
7932
7933 case IEMMODE_64BIT:
7934 IEM_MC_BEGIN(0, 2);
7935 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7936 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7937 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7938 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7939 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7940 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7941 IEM_MC_ADVANCE_RIP();
7942 IEM_MC_END();
7943 return VINF_SUCCESS;
7944
7945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7946 }
7947}
7948
7949
7950/** Opcode 0x90. */
7951FNIEMOP_DEF(iemOp_nop)
7952{
7953 /* R8/R8D and RAX/EAX can be exchanged. */
7954 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7955 {
7956 IEMOP_MNEMONIC("xchg r8,rAX");
7957 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7958 }
7959
7960 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7961 IEMOP_MNEMONIC("pause");
7962 else
7963 IEMOP_MNEMONIC("nop");
7964 IEM_MC_BEGIN(0, 0);
7965 IEM_MC_ADVANCE_RIP();
7966 IEM_MC_END();
7967 return VINF_SUCCESS;
7968}
7969
7970
7971/** Opcode 0x91. */
7972FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7973{
7974 IEMOP_MNEMONIC("xchg rCX,rAX");
7975 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7976}
7977
7978
7979/** Opcode 0x92. */
7980FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7981{
7982 IEMOP_MNEMONIC("xchg rDX,rAX");
7983 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7984}
7985
7986
7987/** Opcode 0x93. */
7988FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7989{
7990 IEMOP_MNEMONIC("xchg rBX,rAX");
7991 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7992}
7993
7994
7995/** Opcode 0x94. */
7996FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7997{
7998 IEMOP_MNEMONIC("xchg rSX,rAX");
7999 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
8000}
8001
8002
8003/** Opcode 0x95. */
8004FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
8005{
8006 IEMOP_MNEMONIC("xchg rBP,rAX");
8007 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
8008}
8009
8010
8011/** Opcode 0x96. */
8012FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
8013{
8014 IEMOP_MNEMONIC("xchg rSI,rAX");
8015 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
8016}
8017
8018
8019/** Opcode 0x97. */
8020FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
8021{
8022 IEMOP_MNEMONIC("xchg rDI,rAX");
8023 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
8024}
8025
8026
8027/** Opcode 0x98. */
8028FNIEMOP_DEF(iemOp_cbw)
8029{
8030 IEMOP_HLP_NO_LOCK_PREFIX();
8031 switch (pIemCpu->enmEffOpSize)
8032 {
8033 case IEMMODE_16BIT:
8034 IEMOP_MNEMONIC("cbw");
8035 IEM_MC_BEGIN(0, 1);
8036 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
8037 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
8038 } IEM_MC_ELSE() {
8039 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
8040 } IEM_MC_ENDIF();
8041 IEM_MC_ADVANCE_RIP();
8042 IEM_MC_END();
8043 return VINF_SUCCESS;
8044
8045 case IEMMODE_32BIT:
8046 IEMOP_MNEMONIC("cwde");
8047 IEM_MC_BEGIN(0, 1);
8048 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
8049 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
8050 } IEM_MC_ELSE() {
8051 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
8052 } IEM_MC_ENDIF();
8053 IEM_MC_ADVANCE_RIP();
8054 IEM_MC_END();
8055 return VINF_SUCCESS;
8056
8057 case IEMMODE_64BIT:
8058 IEMOP_MNEMONIC("cdqe");
8059 IEM_MC_BEGIN(0, 1);
8060 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
8061 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
8062 } IEM_MC_ELSE() {
8063 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
8064 } IEM_MC_ENDIF();
8065 IEM_MC_ADVANCE_RIP();
8066 IEM_MC_END();
8067 return VINF_SUCCESS;
8068
8069 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8070 }
8071}
8072
8073
8074/** Opcode 0x99. */
8075FNIEMOP_DEF(iemOp_cwd)
8076{
8077 IEMOP_HLP_NO_LOCK_PREFIX();
8078 switch (pIemCpu->enmEffOpSize)
8079 {
8080 case IEMMODE_16BIT:
8081 IEMOP_MNEMONIC("cwd");
8082 IEM_MC_BEGIN(0, 1);
8083 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
8084 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
8085 } IEM_MC_ELSE() {
8086 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
8087 } IEM_MC_ENDIF();
8088 IEM_MC_ADVANCE_RIP();
8089 IEM_MC_END();
8090 return VINF_SUCCESS;
8091
8092 case IEMMODE_32BIT:
8093 IEMOP_MNEMONIC("cdq");
8094 IEM_MC_BEGIN(0, 1);
8095 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
8096 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
8097 } IEM_MC_ELSE() {
8098 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
8099 } IEM_MC_ENDIF();
8100 IEM_MC_ADVANCE_RIP();
8101 IEM_MC_END();
8102 return VINF_SUCCESS;
8103
8104 case IEMMODE_64BIT:
8105 IEMOP_MNEMONIC("cqo");
8106 IEM_MC_BEGIN(0, 1);
8107 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
8108 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
8109 } IEM_MC_ELSE() {
8110 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
8111 } IEM_MC_ENDIF();
8112 IEM_MC_ADVANCE_RIP();
8113 IEM_MC_END();
8114 return VINF_SUCCESS;
8115
8116 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8117 }
8118}
8119
8120
8121/** Opcode 0x9a. */
8122FNIEMOP_DEF(iemOp_call_Ap)
8123{
8124 IEMOP_MNEMONIC("call Ap");
8125 IEMOP_HLP_NO_64BIT();
8126
8127 /* Decode the far pointer address and pass it on to the far call C implementation. */
8128 uint32_t offSeg;
8129 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
8130 IEM_OPCODE_GET_NEXT_U32(&offSeg);
8131 else
8132 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
8133 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
8134 IEMOP_HLP_NO_LOCK_PREFIX();
8135 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
8136}
8137
8138
8139/** Opcode 0x9b. (aka fwait) */
8140FNIEMOP_DEF(iemOp_wait)
8141{
8142 IEMOP_MNEMONIC("wait");
8143 IEMOP_HLP_NO_LOCK_PREFIX();
8144
8145 IEM_MC_BEGIN(0, 0);
8146 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
8147 IEM_MC_MAYBE_RAISE_FPU_XCPT();
8148 IEM_MC_ADVANCE_RIP();
8149 IEM_MC_END();
8150 return VINF_SUCCESS;
8151}
8152
8153
8154/** Opcode 0x9c. */
8155FNIEMOP_DEF(iemOp_pushf_Fv)
8156{
8157 IEMOP_HLP_NO_LOCK_PREFIX();
8158 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8159 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
8160}
8161
8162
8163/** Opcode 0x9d. */
8164FNIEMOP_DEF(iemOp_popf_Fv)
8165{
8166 IEMOP_HLP_NO_LOCK_PREFIX();
8167 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8168 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
8169}
8170
8171
8172/** Opcode 0x9e. */
8173FNIEMOP_DEF(iemOp_sahf)
8174{
8175 IEMOP_MNEMONIC("sahf");
8176 IEMOP_HLP_NO_LOCK_PREFIX();
8177 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
8178 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
8179 return IEMOP_RAISE_INVALID_OPCODE();
8180 IEM_MC_BEGIN(0, 2);
8181 IEM_MC_LOCAL(uint32_t, u32Flags);
8182 IEM_MC_LOCAL(uint32_t, EFlags);
8183 IEM_MC_FETCH_EFLAGS(EFlags);
8184 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
8185 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
8186 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
8187 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
8188 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
8189 IEM_MC_COMMIT_EFLAGS(EFlags);
8190 IEM_MC_ADVANCE_RIP();
8191 IEM_MC_END();
8192 return VINF_SUCCESS;
8193}
8194
8195
8196/** Opcode 0x9f. */
8197FNIEMOP_DEF(iemOp_lahf)
8198{
8199 IEMOP_MNEMONIC("lahf");
8200 IEMOP_HLP_NO_LOCK_PREFIX();
8201 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
8202 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
8203 return IEMOP_RAISE_INVALID_OPCODE();
8204 IEM_MC_BEGIN(0, 1);
8205 IEM_MC_LOCAL(uint8_t, u8Flags);
8206 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
8207 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
8208 IEM_MC_ADVANCE_RIP();
8209 IEM_MC_END();
8210 return VINF_SUCCESS;
8211}
8212
8213
8214/**
8215 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
8216 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
8217 * prefixes. Will return on failures.
8218 * @param a_GCPtrMemOff The variable to store the offset in.
8219 */
8220#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
8221 do \
8222 { \
8223 switch (pIemCpu->enmEffAddrMode) \
8224 { \
8225 case IEMMODE_16BIT: \
8226 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
8227 break; \
8228 case IEMMODE_32BIT: \
8229 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
8230 break; \
8231 case IEMMODE_64BIT: \
8232 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
8233 break; \
8234 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
8235 } \
8236 IEMOP_HLP_NO_LOCK_PREFIX(); \
8237 } while (0)
8238
8239/** Opcode 0xa0. */
8240FNIEMOP_DEF(iemOp_mov_Al_Ob)
8241{
8242 /*
8243 * Get the offset and fend of lock prefixes.
8244 */
8245 RTGCPTR GCPtrMemOff;
8246 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8247
8248 /*
8249 * Fetch AL.
8250 */
8251 IEM_MC_BEGIN(0,1);
8252 IEM_MC_LOCAL(uint8_t, u8Tmp);
8253 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8254 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
8255 IEM_MC_ADVANCE_RIP();
8256 IEM_MC_END();
8257 return VINF_SUCCESS;
8258}
8259
8260
8261/** Opcode 0xa1. */
8262FNIEMOP_DEF(iemOp_mov_rAX_Ov)
8263{
8264 /*
8265 * Get the offset and fend of lock prefixes.
8266 */
8267 IEMOP_MNEMONIC("mov rAX,Ov");
8268 RTGCPTR GCPtrMemOff;
8269 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8270
8271 /*
8272 * Fetch rAX.
8273 */
8274 switch (pIemCpu->enmEffOpSize)
8275 {
8276 case IEMMODE_16BIT:
8277 IEM_MC_BEGIN(0,1);
8278 IEM_MC_LOCAL(uint16_t, u16Tmp);
8279 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8280 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
8281 IEM_MC_ADVANCE_RIP();
8282 IEM_MC_END();
8283 return VINF_SUCCESS;
8284
8285 case IEMMODE_32BIT:
8286 IEM_MC_BEGIN(0,1);
8287 IEM_MC_LOCAL(uint32_t, u32Tmp);
8288 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8289 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
8290 IEM_MC_ADVANCE_RIP();
8291 IEM_MC_END();
8292 return VINF_SUCCESS;
8293
8294 case IEMMODE_64BIT:
8295 IEM_MC_BEGIN(0,1);
8296 IEM_MC_LOCAL(uint64_t, u64Tmp);
8297 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
8298 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
8299 IEM_MC_ADVANCE_RIP();
8300 IEM_MC_END();
8301 return VINF_SUCCESS;
8302
8303 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8304 }
8305}
8306
8307
8308/** Opcode 0xa2. */
8309FNIEMOP_DEF(iemOp_mov_Ob_AL)
8310{
8311 /*
8312 * Get the offset and fend of lock prefixes.
8313 */
8314 RTGCPTR GCPtrMemOff;
8315 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8316
8317 /*
8318 * Store AL.
8319 */
8320 IEM_MC_BEGIN(0,1);
8321 IEM_MC_LOCAL(uint8_t, u8Tmp);
8322 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
8323 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
8324 IEM_MC_ADVANCE_RIP();
8325 IEM_MC_END();
8326 return VINF_SUCCESS;
8327}
8328
8329
8330/** Opcode 0xa3. */
8331FNIEMOP_DEF(iemOp_mov_Ov_rAX)
8332{
8333 /*
8334 * Get the offset and fend of lock prefixes.
8335 */
8336 RTGCPTR GCPtrMemOff;
8337 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8338
8339 /*
8340 * Store rAX.
8341 */
8342 switch (pIemCpu->enmEffOpSize)
8343 {
8344 case IEMMODE_16BIT:
8345 IEM_MC_BEGIN(0,1);
8346 IEM_MC_LOCAL(uint16_t, u16Tmp);
8347 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
8348 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
8349 IEM_MC_ADVANCE_RIP();
8350 IEM_MC_END();
8351 return VINF_SUCCESS;
8352
8353 case IEMMODE_32BIT:
8354 IEM_MC_BEGIN(0,1);
8355 IEM_MC_LOCAL(uint32_t, u32Tmp);
8356 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
8357 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
8358 IEM_MC_ADVANCE_RIP();
8359 IEM_MC_END();
8360 return VINF_SUCCESS;
8361
8362 case IEMMODE_64BIT:
8363 IEM_MC_BEGIN(0,1);
8364 IEM_MC_LOCAL(uint64_t, u64Tmp);
8365 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
8366 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
8367 IEM_MC_ADVANCE_RIP();
8368 IEM_MC_END();
8369 return VINF_SUCCESS;
8370
8371 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8372 }
8373}
8374
8375/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
8376#define IEM_MOVS_CASE(ValBits, AddrBits) \
8377 IEM_MC_BEGIN(0, 2); \
8378 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8379 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8380 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8381 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8382 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8383 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8384 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8385 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8386 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8387 } IEM_MC_ELSE() { \
8388 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8389 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8390 } IEM_MC_ENDIF(); \
8391 IEM_MC_ADVANCE_RIP(); \
8392 IEM_MC_END();
8393
8394/** Opcode 0xa4. */
8395FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
8396{
8397 IEMOP_HLP_NO_LOCK_PREFIX();
8398
8399 /*
8400 * Use the C implementation if a repeat prefix is encountered.
8401 */
8402 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8403 {
8404 IEMOP_MNEMONIC("rep movsb Xb,Yb");
8405 switch (pIemCpu->enmEffAddrMode)
8406 {
8407 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
8408 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
8409 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
8410 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8411 }
8412 }
8413 IEMOP_MNEMONIC("movsb Xb,Yb");
8414
8415 /*
8416 * Sharing case implementation with movs[wdq] below.
8417 */
8418 switch (pIemCpu->enmEffAddrMode)
8419 {
8420 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
8421 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
8422 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
8423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8424 }
8425 return VINF_SUCCESS;
8426}
8427
8428
8429/** Opcode 0xa5. */
8430FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
8431{
8432 IEMOP_HLP_NO_LOCK_PREFIX();
8433
8434 /*
8435 * Use the C implementation if a repeat prefix is encountered.
8436 */
8437 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8438 {
8439 IEMOP_MNEMONIC("rep movs Xv,Yv");
8440 switch (pIemCpu->enmEffOpSize)
8441 {
8442 case IEMMODE_16BIT:
8443 switch (pIemCpu->enmEffAddrMode)
8444 {
8445 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
8446 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
8447 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
8448 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8449 }
8450 break;
8451 case IEMMODE_32BIT:
8452 switch (pIemCpu->enmEffAddrMode)
8453 {
8454 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
8455 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
8456 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
8457 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8458 }
8459 case IEMMODE_64BIT:
8460 switch (pIemCpu->enmEffAddrMode)
8461 {
8462 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8463 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
8464 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
8465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8466 }
8467 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8468 }
8469 }
8470 IEMOP_MNEMONIC("movs Xv,Yv");
8471
8472 /*
8473 * Annoying double switch here.
8474 * Using ugly macro for implementing the cases, sharing it with movsb.
8475 */
8476 switch (pIemCpu->enmEffOpSize)
8477 {
8478 case IEMMODE_16BIT:
8479 switch (pIemCpu->enmEffAddrMode)
8480 {
8481 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
8482 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
8483 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
8484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8485 }
8486 break;
8487
8488 case IEMMODE_32BIT:
8489 switch (pIemCpu->enmEffAddrMode)
8490 {
8491 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
8492 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
8493 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
8494 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8495 }
8496 break;
8497
8498 case IEMMODE_64BIT:
8499 switch (pIemCpu->enmEffAddrMode)
8500 {
8501 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8502 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
8503 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
8504 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8505 }
8506 break;
8507 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8508 }
8509 return VINF_SUCCESS;
8510}
8511
8512#undef IEM_MOVS_CASE
8513
8514/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
8515#define IEM_CMPS_CASE(ValBits, AddrBits) \
8516 IEM_MC_BEGIN(3, 3); \
8517 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
8518 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
8519 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8520 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
8521 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8522 \
8523 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8524 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
8525 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8526 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
8527 IEM_MC_REF_LOCAL(puValue1, uValue1); \
8528 IEM_MC_REF_EFLAGS(pEFlags); \
8529 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
8530 \
8531 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8532 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8533 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8534 } IEM_MC_ELSE() { \
8535 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8536 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8537 } IEM_MC_ENDIF(); \
8538 IEM_MC_ADVANCE_RIP(); \
8539 IEM_MC_END(); \
8540
8541/** Opcode 0xa6. */
8542FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
8543{
8544 IEMOP_HLP_NO_LOCK_PREFIX();
8545
8546 /*
8547 * Use the C implementation if a repeat prefix is encountered.
8548 */
8549 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8550 {
8551 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8552 switch (pIemCpu->enmEffAddrMode)
8553 {
8554 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
8555 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
8556 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
8557 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8558 }
8559 }
8560 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8561 {
8562 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8563 switch (pIemCpu->enmEffAddrMode)
8564 {
8565 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
8566 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
8567 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
8568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8569 }
8570 }
8571 IEMOP_MNEMONIC("cmps Xb,Yb");
8572
8573 /*
8574 * Sharing case implementation with cmps[wdq] below.
8575 */
8576 switch (pIemCpu->enmEffAddrMode)
8577 {
8578 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
8579 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
8580 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
8581 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8582 }
8583 return VINF_SUCCESS;
8584
8585}
8586
8587
8588/** Opcode 0xa7. */
8589FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
8590{
8591 IEMOP_HLP_NO_LOCK_PREFIX();
8592
8593 /*
8594 * Use the C implementation if a repeat prefix is encountered.
8595 */
8596 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8597 {
8598 IEMOP_MNEMONIC("repe cmps Xv,Yv");
8599 switch (pIemCpu->enmEffOpSize)
8600 {
8601 case IEMMODE_16BIT:
8602 switch (pIemCpu->enmEffAddrMode)
8603 {
8604 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
8605 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
8606 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
8607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8608 }
8609 break;
8610 case IEMMODE_32BIT:
8611 switch (pIemCpu->enmEffAddrMode)
8612 {
8613 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
8614 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
8615 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
8616 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8617 }
8618 case IEMMODE_64BIT:
8619 switch (pIemCpu->enmEffAddrMode)
8620 {
8621 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8622 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
8623 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
8624 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8625 }
8626 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8627 }
8628 }
8629
8630 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8631 {
8632 IEMOP_MNEMONIC("repne cmps Xv,Yv");
8633 switch (pIemCpu->enmEffOpSize)
8634 {
8635 case IEMMODE_16BIT:
8636 switch (pIemCpu->enmEffAddrMode)
8637 {
8638 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
8639 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
8640 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
8641 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8642 }
8643 break;
8644 case IEMMODE_32BIT:
8645 switch (pIemCpu->enmEffAddrMode)
8646 {
8647 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8648 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8649 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8650 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8651 }
8652 case IEMMODE_64BIT:
8653 switch (pIemCpu->enmEffAddrMode)
8654 {
8655 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8656 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8657 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8659 }
8660 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8661 }
8662 }
8663
8664 IEMOP_MNEMONIC("cmps Xv,Yv");
8665
8666 /*
8667 * Annoying double switch here.
8668 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8669 */
8670 switch (pIemCpu->enmEffOpSize)
8671 {
8672 case IEMMODE_16BIT:
8673 switch (pIemCpu->enmEffAddrMode)
8674 {
8675 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8676 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8677 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8678 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8679 }
8680 break;
8681
8682 case IEMMODE_32BIT:
8683 switch (pIemCpu->enmEffAddrMode)
8684 {
8685 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8686 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8687 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8689 }
8690 break;
8691
8692 case IEMMODE_64BIT:
8693 switch (pIemCpu->enmEffAddrMode)
8694 {
8695 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8696 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8697 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8698 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8699 }
8700 break;
8701 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8702 }
8703 return VINF_SUCCESS;
8704
8705}
8706
8707#undef IEM_CMPS_CASE
8708
8709/** Opcode 0xa8. */
8710FNIEMOP_DEF(iemOp_test_AL_Ib)
8711{
8712 IEMOP_MNEMONIC("test al,Ib");
8713 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8714 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8715}
8716
8717
8718/** Opcode 0xa9. */
8719FNIEMOP_DEF(iemOp_test_eAX_Iz)
8720{
8721 IEMOP_MNEMONIC("test rAX,Iz");
8722 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8723 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8724}
8725
8726
8727/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8728#define IEM_STOS_CASE(ValBits, AddrBits) \
8729 IEM_MC_BEGIN(0, 2); \
8730 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8731 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8732 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8733 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8734 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8735 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8736 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8737 } IEM_MC_ELSE() { \
8738 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8739 } IEM_MC_ENDIF(); \
8740 IEM_MC_ADVANCE_RIP(); \
8741 IEM_MC_END(); \
8742
8743/** Opcode 0xaa. */
8744FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8745{
8746 IEMOP_HLP_NO_LOCK_PREFIX();
8747
8748 /*
8749 * Use the C implementation if a repeat prefix is encountered.
8750 */
8751 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8752 {
8753 IEMOP_MNEMONIC("rep stos Yb,al");
8754 switch (pIemCpu->enmEffAddrMode)
8755 {
8756 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8757 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8758 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8759 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8760 }
8761 }
8762 IEMOP_MNEMONIC("stos Yb,al");
8763
8764 /*
8765 * Sharing case implementation with stos[wdq] below.
8766 */
8767 switch (pIemCpu->enmEffAddrMode)
8768 {
8769 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8770 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8771 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8773 }
8774 return VINF_SUCCESS;
8775}
8776
8777
8778/** Opcode 0xab. */
8779FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8780{
8781 IEMOP_HLP_NO_LOCK_PREFIX();
8782
8783 /*
8784 * Use the C implementation if a repeat prefix is encountered.
8785 */
8786 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8787 {
8788 IEMOP_MNEMONIC("rep stos Yv,rAX");
8789 switch (pIemCpu->enmEffOpSize)
8790 {
8791 case IEMMODE_16BIT:
8792 switch (pIemCpu->enmEffAddrMode)
8793 {
8794 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8795 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8796 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8797 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8798 }
8799 break;
8800 case IEMMODE_32BIT:
8801 switch (pIemCpu->enmEffAddrMode)
8802 {
8803 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8804 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8805 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8806 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8807 }
8808 case IEMMODE_64BIT:
8809 switch (pIemCpu->enmEffAddrMode)
8810 {
8811 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8812 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8813 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8814 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8815 }
8816 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8817 }
8818 }
8819 IEMOP_MNEMONIC("stos Yv,rAX");
8820
8821 /*
8822 * Annoying double switch here.
8823 * Using ugly macro for implementing the cases, sharing it with stosb.
8824 */
8825 switch (pIemCpu->enmEffOpSize)
8826 {
8827 case IEMMODE_16BIT:
8828 switch (pIemCpu->enmEffAddrMode)
8829 {
8830 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8831 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8832 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8834 }
8835 break;
8836
8837 case IEMMODE_32BIT:
8838 switch (pIemCpu->enmEffAddrMode)
8839 {
8840 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8841 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8842 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8843 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8844 }
8845 break;
8846
8847 case IEMMODE_64BIT:
8848 switch (pIemCpu->enmEffAddrMode)
8849 {
8850 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8851 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8852 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8853 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8854 }
8855 break;
8856 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8857 }
8858 return VINF_SUCCESS;
8859}
8860
8861#undef IEM_STOS_CASE
8862
8863/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8864#define IEM_LODS_CASE(ValBits, AddrBits) \
8865 IEM_MC_BEGIN(0, 2); \
8866 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8867 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8868 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8869 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8870 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8871 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8872 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8873 } IEM_MC_ELSE() { \
8874 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8875 } IEM_MC_ENDIF(); \
8876 IEM_MC_ADVANCE_RIP(); \
8877 IEM_MC_END();
8878
8879/** Opcode 0xac. */
8880FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8881{
8882 IEMOP_HLP_NO_LOCK_PREFIX();
8883
8884 /*
8885 * Use the C implementation if a repeat prefix is encountered.
8886 */
8887 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8888 {
8889 IEMOP_MNEMONIC("rep lodsb al,Xb");
8890 switch (pIemCpu->enmEffAddrMode)
8891 {
8892 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8893 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8894 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8895 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8896 }
8897 }
8898 IEMOP_MNEMONIC("lodsb al,Xb");
8899
8900 /*
8901 * Sharing case implementation with stos[wdq] below.
8902 */
8903 switch (pIemCpu->enmEffAddrMode)
8904 {
8905 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8906 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8907 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8908 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8909 }
8910 return VINF_SUCCESS;
8911}
8912
8913
8914/** Opcode 0xad. */
8915FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8916{
8917 IEMOP_HLP_NO_LOCK_PREFIX();
8918
8919 /*
8920 * Use the C implementation if a repeat prefix is encountered.
8921 */
8922 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8923 {
8924 IEMOP_MNEMONIC("rep lods rAX,Xv");
8925 switch (pIemCpu->enmEffOpSize)
8926 {
8927 case IEMMODE_16BIT:
8928 switch (pIemCpu->enmEffAddrMode)
8929 {
8930 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8931 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8932 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8933 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8934 }
8935 break;
8936 case IEMMODE_32BIT:
8937 switch (pIemCpu->enmEffAddrMode)
8938 {
8939 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8940 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8941 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8942 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8943 }
8944 case IEMMODE_64BIT:
8945 switch (pIemCpu->enmEffAddrMode)
8946 {
8947 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8948 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8949 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8950 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8951 }
8952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8953 }
8954 }
8955 IEMOP_MNEMONIC("lods rAX,Xv");
8956
8957 /*
8958 * Annoying double switch here.
8959 * Using ugly macro for implementing the cases, sharing it with lodsb.
8960 */
8961 switch (pIemCpu->enmEffOpSize)
8962 {
8963 case IEMMODE_16BIT:
8964 switch (pIemCpu->enmEffAddrMode)
8965 {
8966 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8967 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8968 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8969 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8970 }
8971 break;
8972
8973 case IEMMODE_32BIT:
8974 switch (pIemCpu->enmEffAddrMode)
8975 {
8976 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8977 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8978 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8980 }
8981 break;
8982
8983 case IEMMODE_64BIT:
8984 switch (pIemCpu->enmEffAddrMode)
8985 {
8986 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8987 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8988 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8989 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8990 }
8991 break;
8992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8993 }
8994 return VINF_SUCCESS;
8995}
8996
8997#undef IEM_LODS_CASE
8998
8999/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
9000#define IEM_SCAS_CASE(ValBits, AddrBits) \
9001 IEM_MC_BEGIN(3, 2); \
9002 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
9003 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
9004 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
9005 IEM_MC_LOCAL(RTGCPTR, uAddr); \
9006 \
9007 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
9008 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
9009 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
9010 IEM_MC_REF_EFLAGS(pEFlags); \
9011 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
9012 \
9013 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
9014 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
9015 } IEM_MC_ELSE() { \
9016 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
9017 } IEM_MC_ENDIF(); \
9018 IEM_MC_ADVANCE_RIP(); \
9019 IEM_MC_END();
9020
9021/** Opcode 0xae. */
9022FNIEMOP_DEF(iemOp_scasb_AL_Xb)
9023{
9024 IEMOP_HLP_NO_LOCK_PREFIX();
9025
9026 /*
9027 * Use the C implementation if a repeat prefix is encountered.
9028 */
9029 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
9030 {
9031 IEMOP_MNEMONIC("repe scasb al,Xb");
9032 switch (pIemCpu->enmEffAddrMode)
9033 {
9034 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
9035 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
9036 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
9037 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9038 }
9039 }
9040 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
9041 {
9042 IEMOP_MNEMONIC("repne scasb al,Xb");
9043 switch (pIemCpu->enmEffAddrMode)
9044 {
9045 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
9046 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
9047 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
9048 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9049 }
9050 }
9051 IEMOP_MNEMONIC("scasb al,Xb");
9052
9053 /*
9054 * Sharing case implementation with stos[wdq] below.
9055 */
9056 switch (pIemCpu->enmEffAddrMode)
9057 {
9058 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
9059 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
9060 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
9061 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9062 }
9063 return VINF_SUCCESS;
9064}
9065
9066
9067/** Opcode 0xaf. */
9068FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
9069{
9070 IEMOP_HLP_NO_LOCK_PREFIX();
9071
9072 /*
9073 * Use the C implementation if a repeat prefix is encountered.
9074 */
9075 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
9076 {
9077 IEMOP_MNEMONIC("repe scas rAX,Xv");
9078 switch (pIemCpu->enmEffOpSize)
9079 {
9080 case IEMMODE_16BIT:
9081 switch (pIemCpu->enmEffAddrMode)
9082 {
9083 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
9084 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
9085 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
9086 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9087 }
9088 break;
9089 case IEMMODE_32BIT:
9090 switch (pIemCpu->enmEffAddrMode)
9091 {
9092 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
9093 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
9094 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
9095 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9096 }
9097 case IEMMODE_64BIT:
9098 switch (pIemCpu->enmEffAddrMode)
9099 {
9100 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
9101 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
9102 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
9103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9104 }
9105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9106 }
9107 }
9108 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
9109 {
9110 IEMOP_MNEMONIC("repne scas rAX,Xv");
9111 switch (pIemCpu->enmEffOpSize)
9112 {
9113 case IEMMODE_16BIT:
9114 switch (pIemCpu->enmEffAddrMode)
9115 {
9116 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
9117 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
9118 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
9119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9120 }
9121 break;
9122 case IEMMODE_32BIT:
9123 switch (pIemCpu->enmEffAddrMode)
9124 {
9125 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
9126 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
9127 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
9128 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9129 }
9130 case IEMMODE_64BIT:
9131 switch (pIemCpu->enmEffAddrMode)
9132 {
9133 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
9134 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
9135 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
9136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9137 }
9138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9139 }
9140 }
9141 IEMOP_MNEMONIC("scas rAX,Xv");
9142
9143 /*
9144 * Annoying double switch here.
9145 * Using ugly macro for implementing the cases, sharing it with scasb.
9146 */
9147 switch (pIemCpu->enmEffOpSize)
9148 {
9149 case IEMMODE_16BIT:
9150 switch (pIemCpu->enmEffAddrMode)
9151 {
9152 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
9153 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
9154 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
9155 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9156 }
9157 break;
9158
9159 case IEMMODE_32BIT:
9160 switch (pIemCpu->enmEffAddrMode)
9161 {
9162 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
9163 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
9164 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
9165 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9166 }
9167 break;
9168
9169 case IEMMODE_64BIT:
9170 switch (pIemCpu->enmEffAddrMode)
9171 {
9172 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
9173 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
9174 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
9175 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9176 }
9177 break;
9178 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9179 }
9180 return VINF_SUCCESS;
9181}
9182
9183#undef IEM_SCAS_CASE
9184
9185/**
9186 * Common 'mov r8, imm8' helper.
9187 */
9188FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
9189{
9190 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9191 IEMOP_HLP_NO_LOCK_PREFIX();
9192
9193 IEM_MC_BEGIN(0, 1);
9194 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
9195 IEM_MC_STORE_GREG_U8(iReg, u8Value);
9196 IEM_MC_ADVANCE_RIP();
9197 IEM_MC_END();
9198
9199 return VINF_SUCCESS;
9200}
9201
9202
9203/** Opcode 0xb0. */
9204FNIEMOP_DEF(iemOp_mov_AL_Ib)
9205{
9206 IEMOP_MNEMONIC("mov AL,Ib");
9207 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
9208}
9209
9210
9211/** Opcode 0xb1. */
9212FNIEMOP_DEF(iemOp_CL_Ib)
9213{
9214 IEMOP_MNEMONIC("mov CL,Ib");
9215 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
9216}
9217
9218
9219/** Opcode 0xb2. */
9220FNIEMOP_DEF(iemOp_DL_Ib)
9221{
9222 IEMOP_MNEMONIC("mov DL,Ib");
9223 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
9224}
9225
9226
9227/** Opcode 0xb3. */
9228FNIEMOP_DEF(iemOp_BL_Ib)
9229{
9230 IEMOP_MNEMONIC("mov BL,Ib");
9231 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
9232}
9233
9234
9235/** Opcode 0xb4. */
9236FNIEMOP_DEF(iemOp_mov_AH_Ib)
9237{
9238 IEMOP_MNEMONIC("mov AH,Ib");
9239 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
9240}
9241
9242
9243/** Opcode 0xb5. */
9244FNIEMOP_DEF(iemOp_CH_Ib)
9245{
9246 IEMOP_MNEMONIC("mov CH,Ib");
9247 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
9248}
9249
9250
9251/** Opcode 0xb6. */
9252FNIEMOP_DEF(iemOp_DH_Ib)
9253{
9254 IEMOP_MNEMONIC("mov DH,Ib");
9255 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
9256}
9257
9258
9259/** Opcode 0xb7. */
9260FNIEMOP_DEF(iemOp_BH_Ib)
9261{
9262 IEMOP_MNEMONIC("mov BH,Ib");
9263 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
9264}
9265
9266
9267/**
9268 * Common 'mov regX,immX' helper.
9269 */
9270FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
9271{
9272 switch (pIemCpu->enmEffOpSize)
9273 {
9274 case IEMMODE_16BIT:
9275 {
9276 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9277 IEMOP_HLP_NO_LOCK_PREFIX();
9278
9279 IEM_MC_BEGIN(0, 1);
9280 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
9281 IEM_MC_STORE_GREG_U16(iReg, u16Value);
9282 IEM_MC_ADVANCE_RIP();
9283 IEM_MC_END();
9284 break;
9285 }
9286
9287 case IEMMODE_32BIT:
9288 {
9289 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9290 IEMOP_HLP_NO_LOCK_PREFIX();
9291
9292 IEM_MC_BEGIN(0, 1);
9293 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
9294 IEM_MC_STORE_GREG_U32(iReg, u32Value);
9295 IEM_MC_ADVANCE_RIP();
9296 IEM_MC_END();
9297 break;
9298 }
9299 case IEMMODE_64BIT:
9300 {
9301 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9302 IEMOP_HLP_NO_LOCK_PREFIX();
9303
9304 IEM_MC_BEGIN(0, 1);
9305 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
9306 IEM_MC_STORE_GREG_U64(iReg, u64Value);
9307 IEM_MC_ADVANCE_RIP();
9308 IEM_MC_END();
9309 break;
9310 }
9311 }
9312
9313 return VINF_SUCCESS;
9314}
9315
9316
9317/** Opcode 0xb8. */
9318FNIEMOP_DEF(iemOp_eAX_Iv)
9319{
9320 IEMOP_MNEMONIC("mov rAX,IV");
9321 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
9322}
9323
9324
9325/** Opcode 0xb9. */
9326FNIEMOP_DEF(iemOp_eCX_Iv)
9327{
9328 IEMOP_MNEMONIC("mov rCX,IV");
9329 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
9330}
9331
9332
9333/** Opcode 0xba. */
9334FNIEMOP_DEF(iemOp_eDX_Iv)
9335{
9336 IEMOP_MNEMONIC("mov rDX,IV");
9337 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
9338}
9339
9340
9341/** Opcode 0xbb. */
9342FNIEMOP_DEF(iemOp_eBX_Iv)
9343{
9344 IEMOP_MNEMONIC("mov rBX,IV");
9345 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
9346}
9347
9348
9349/** Opcode 0xbc. */
9350FNIEMOP_DEF(iemOp_eSP_Iv)
9351{
9352 IEMOP_MNEMONIC("mov rSP,IV");
9353 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
9354}
9355
9356
9357/** Opcode 0xbd. */
9358FNIEMOP_DEF(iemOp_eBP_Iv)
9359{
9360 IEMOP_MNEMONIC("mov rBP,IV");
9361 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
9362}
9363
9364
9365/** Opcode 0xbe. */
9366FNIEMOP_DEF(iemOp_eSI_Iv)
9367{
9368 IEMOP_MNEMONIC("mov rSI,IV");
9369 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
9370}
9371
9372
9373/** Opcode 0xbf. */
9374FNIEMOP_DEF(iemOp_eDI_Iv)
9375{
9376 IEMOP_MNEMONIC("mov rDI,IV");
9377 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
9378}
9379
9380
9381/** Opcode 0xc0. */
9382FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
9383{
9384 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9385 PCIEMOPSHIFTSIZES pImpl;
9386 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9387 {
9388 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
9389 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
9390 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
9391 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
9392 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
9393 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
9394 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
9395 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9396 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9397 }
9398 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9399
9400 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9401 {
9402 /* register */
9403 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9404 IEMOP_HLP_NO_LOCK_PREFIX();
9405 IEM_MC_BEGIN(3, 0);
9406 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9407 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9408 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9409 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9410 IEM_MC_REF_EFLAGS(pEFlags);
9411 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9412 IEM_MC_ADVANCE_RIP();
9413 IEM_MC_END();
9414 }
9415 else
9416 {
9417 /* memory */
9418 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9419 IEM_MC_BEGIN(3, 2);
9420 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9421 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9422 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9423 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9424
9425 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9426 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9427 IEM_MC_ASSIGN(cShiftArg, cShift);
9428 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9429 IEM_MC_FETCH_EFLAGS(EFlags);
9430 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9431
9432 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9433 IEM_MC_COMMIT_EFLAGS(EFlags);
9434 IEM_MC_ADVANCE_RIP();
9435 IEM_MC_END();
9436 }
9437 return VINF_SUCCESS;
9438}
9439
9440
9441/** Opcode 0xc1. */
9442FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
9443{
9444 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9445 PCIEMOPSHIFTSIZES pImpl;
9446 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9447 {
9448 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
9449 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
9450 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
9451 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
9452 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
9453 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
9454 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
9455 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9456 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9457 }
9458 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9459
9460 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9461 {
9462 /* register */
9463 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9464 IEMOP_HLP_NO_LOCK_PREFIX();
9465 switch (pIemCpu->enmEffOpSize)
9466 {
9467 case IEMMODE_16BIT:
9468 IEM_MC_BEGIN(3, 0);
9469 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9470 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9471 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9472 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9473 IEM_MC_REF_EFLAGS(pEFlags);
9474 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9475 IEM_MC_ADVANCE_RIP();
9476 IEM_MC_END();
9477 return VINF_SUCCESS;
9478
9479 case IEMMODE_32BIT:
9480 IEM_MC_BEGIN(3, 0);
9481 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9482 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9483 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9484 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9485 IEM_MC_REF_EFLAGS(pEFlags);
9486 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9487 IEM_MC_ADVANCE_RIP();
9488 IEM_MC_END();
9489 return VINF_SUCCESS;
9490
9491 case IEMMODE_64BIT:
9492 IEM_MC_BEGIN(3, 0);
9493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9494 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9496 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9497 IEM_MC_REF_EFLAGS(pEFlags);
9498 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9499 IEM_MC_ADVANCE_RIP();
9500 IEM_MC_END();
9501 return VINF_SUCCESS;
9502
9503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9504 }
9505 }
9506 else
9507 {
9508 /* memory */
9509 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9510 switch (pIemCpu->enmEffOpSize)
9511 {
9512 case IEMMODE_16BIT:
9513 IEM_MC_BEGIN(3, 2);
9514 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9515 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9516 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9517 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9518
9519 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9520 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9521 IEM_MC_ASSIGN(cShiftArg, cShift);
9522 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9523 IEM_MC_FETCH_EFLAGS(EFlags);
9524 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9525
9526 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9527 IEM_MC_COMMIT_EFLAGS(EFlags);
9528 IEM_MC_ADVANCE_RIP();
9529 IEM_MC_END();
9530 return VINF_SUCCESS;
9531
9532 case IEMMODE_32BIT:
9533 IEM_MC_BEGIN(3, 2);
9534 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9535 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9536 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9537 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9538
9539 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9540 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9541 IEM_MC_ASSIGN(cShiftArg, cShift);
9542 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9543 IEM_MC_FETCH_EFLAGS(EFlags);
9544 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9545
9546 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9547 IEM_MC_COMMIT_EFLAGS(EFlags);
9548 IEM_MC_ADVANCE_RIP();
9549 IEM_MC_END();
9550 return VINF_SUCCESS;
9551
9552 case IEMMODE_64BIT:
9553 IEM_MC_BEGIN(3, 2);
9554 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9555 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9556 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9558
9559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9560 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9561 IEM_MC_ASSIGN(cShiftArg, cShift);
9562 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9563 IEM_MC_FETCH_EFLAGS(EFlags);
9564 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9565
9566 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9567 IEM_MC_COMMIT_EFLAGS(EFlags);
9568 IEM_MC_ADVANCE_RIP();
9569 IEM_MC_END();
9570 return VINF_SUCCESS;
9571
9572 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9573 }
9574 }
9575}
9576
9577
9578/** Opcode 0xc2. */
9579FNIEMOP_DEF(iemOp_retn_Iw)
9580{
9581 IEMOP_MNEMONIC("retn Iw");
9582 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9583 IEMOP_HLP_NO_LOCK_PREFIX();
9584 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9585 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
9586}
9587
9588
9589/** Opcode 0xc3. */
9590FNIEMOP_DEF(iemOp_retn)
9591{
9592 IEMOP_MNEMONIC("retn");
9593 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9594 IEMOP_HLP_NO_LOCK_PREFIX();
9595 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
9596}
9597
9598
9599/** Opcode 0xc4. */
9600FNIEMOP_DEF(iemOp_les_Gv_Mp)
9601{
9602 IEMOP_MNEMONIC("les Gv,Mp");
9603 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
9604}
9605
9606
9607/** Opcode 0xc5. */
9608FNIEMOP_DEF(iemOp_lds_Gv_Mp)
9609{
9610 IEMOP_MNEMONIC("lds Gv,Mp");
9611 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
9612}
9613
9614
9615/** Opcode 0xc6. */
9616FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
9617{
9618 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9619 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9620 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9621 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9622 IEMOP_MNEMONIC("mov Eb,Ib");
9623
9624 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9625 {
9626 /* register access */
9627 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9628 IEM_MC_BEGIN(0, 0);
9629 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
9630 IEM_MC_ADVANCE_RIP();
9631 IEM_MC_END();
9632 }
9633 else
9634 {
9635 /* memory access. */
9636 IEM_MC_BEGIN(0, 1);
9637 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9638 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9639 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9640 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
9641 IEM_MC_ADVANCE_RIP();
9642 IEM_MC_END();
9643 }
9644 return VINF_SUCCESS;
9645}
9646
9647
9648/** Opcode 0xc7. */
9649FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9650{
9651 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9652 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9653 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9654 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9655 IEMOP_MNEMONIC("mov Ev,Iz");
9656
9657 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9658 {
9659 /* register access */
9660 switch (pIemCpu->enmEffOpSize)
9661 {
9662 case IEMMODE_16BIT:
9663 IEM_MC_BEGIN(0, 0);
9664 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9665 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9666 IEM_MC_ADVANCE_RIP();
9667 IEM_MC_END();
9668 return VINF_SUCCESS;
9669
9670 case IEMMODE_32BIT:
9671 IEM_MC_BEGIN(0, 0);
9672 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9673 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9674 IEM_MC_ADVANCE_RIP();
9675 IEM_MC_END();
9676 return VINF_SUCCESS;
9677
9678 case IEMMODE_64BIT:
9679 IEM_MC_BEGIN(0, 0);
9680 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9681 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9682 IEM_MC_ADVANCE_RIP();
9683 IEM_MC_END();
9684 return VINF_SUCCESS;
9685
9686 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9687 }
9688 }
9689 else
9690 {
9691 /* memory access. */
9692 switch (pIemCpu->enmEffOpSize)
9693 {
9694 case IEMMODE_16BIT:
9695 IEM_MC_BEGIN(0, 1);
9696 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9697 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9698 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9699 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9700 IEM_MC_ADVANCE_RIP();
9701 IEM_MC_END();
9702 return VINF_SUCCESS;
9703
9704 case IEMMODE_32BIT:
9705 IEM_MC_BEGIN(0, 1);
9706 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9707 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9708 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9709 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9710 IEM_MC_ADVANCE_RIP();
9711 IEM_MC_END();
9712 return VINF_SUCCESS;
9713
9714 case IEMMODE_64BIT:
9715 IEM_MC_BEGIN(0, 1);
9716 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9717 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9718 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9719 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9720 IEM_MC_ADVANCE_RIP();
9721 IEM_MC_END();
9722 return VINF_SUCCESS;
9723
9724 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9725 }
9726 }
9727}
9728
9729
9730
9731
9732/** Opcode 0xc8. */
9733FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9734
9735
9736/** Opcode 0xc9. */
9737FNIEMOP_DEF(iemOp_leave)
9738{
9739 IEMOP_MNEMONIC("retn");
9740 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9741 IEMOP_HLP_NO_LOCK_PREFIX();
9742 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9743}
9744
9745
9746/** Opcode 0xca. */
9747FNIEMOP_DEF(iemOp_retf_Iw)
9748{
9749 IEMOP_MNEMONIC("retf Iw");
9750 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9751 IEMOP_HLP_NO_LOCK_PREFIX();
9752 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9753 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9754}
9755
9756
9757/** Opcode 0xcb. */
9758FNIEMOP_DEF(iemOp_retf)
9759{
9760 IEMOP_MNEMONIC("retf");
9761 IEMOP_HLP_NO_LOCK_PREFIX();
9762 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9763 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9764}
9765
9766
9767/** Opcode 0xcc. */
9768FNIEMOP_DEF(iemOp_int_3)
9769{
9770 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9771}
9772
9773
9774/** Opcode 0xcd. */
9775FNIEMOP_DEF(iemOp_int_Ib)
9776{
9777 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
9778 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9779}
9780
9781
9782/** Opcode 0xce. */
9783FNIEMOP_DEF(iemOp_into)
9784{
9785 IEM_MC_BEGIN(2, 0);
9786 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9787 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9788 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9789 IEM_MC_END();
9790 return VINF_SUCCESS;
9791}
9792
9793
9794/** Opcode 0xcf. */
9795FNIEMOP_DEF(iemOp_iret)
9796{
9797 IEMOP_MNEMONIC("iret");
9798 IEMOP_HLP_NO_LOCK_PREFIX();
9799 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9800}
9801
9802
9803/** Opcode 0xd0. */
9804FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9805{
9806 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9807 PCIEMOPSHIFTSIZES pImpl;
9808 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9809 {
9810 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9811 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9812 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9813 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9814 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9815 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9816 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9817 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9818 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9819 }
9820 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9821
9822 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9823 {
9824 /* register */
9825 IEMOP_HLP_NO_LOCK_PREFIX();
9826 IEM_MC_BEGIN(3, 0);
9827 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9828 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9829 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9830 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9831 IEM_MC_REF_EFLAGS(pEFlags);
9832 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9833 IEM_MC_ADVANCE_RIP();
9834 IEM_MC_END();
9835 }
9836 else
9837 {
9838 /* memory */
9839 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9840 IEM_MC_BEGIN(3, 2);
9841 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9842 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9843 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9844 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9845
9846 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9847 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9848 IEM_MC_FETCH_EFLAGS(EFlags);
9849 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9850
9851 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9852 IEM_MC_COMMIT_EFLAGS(EFlags);
9853 IEM_MC_ADVANCE_RIP();
9854 IEM_MC_END();
9855 }
9856 return VINF_SUCCESS;
9857}
9858
9859
9860
9861/** Opcode 0xd1. */
9862FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9863{
9864 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9865 PCIEMOPSHIFTSIZES pImpl;
9866 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9867 {
9868 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9869 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9870 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9871 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9872 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9873 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9874 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9875 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9876 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9877 }
9878 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9879
9880 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9881 {
9882 /* register */
9883 IEMOP_HLP_NO_LOCK_PREFIX();
9884 switch (pIemCpu->enmEffOpSize)
9885 {
9886 case IEMMODE_16BIT:
9887 IEM_MC_BEGIN(3, 0);
9888 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9889 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9890 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9891 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9892 IEM_MC_REF_EFLAGS(pEFlags);
9893 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9894 IEM_MC_ADVANCE_RIP();
9895 IEM_MC_END();
9896 return VINF_SUCCESS;
9897
9898 case IEMMODE_32BIT:
9899 IEM_MC_BEGIN(3, 0);
9900 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9901 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9902 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9903 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9904 IEM_MC_REF_EFLAGS(pEFlags);
9905 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9906 IEM_MC_ADVANCE_RIP();
9907 IEM_MC_END();
9908 return VINF_SUCCESS;
9909
9910 case IEMMODE_64BIT:
9911 IEM_MC_BEGIN(3, 0);
9912 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9913 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9914 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9915 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9916 IEM_MC_REF_EFLAGS(pEFlags);
9917 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9918 IEM_MC_ADVANCE_RIP();
9919 IEM_MC_END();
9920 return VINF_SUCCESS;
9921
9922 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9923 }
9924 }
9925 else
9926 {
9927 /* memory */
9928 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9929 switch (pIemCpu->enmEffOpSize)
9930 {
9931 case IEMMODE_16BIT:
9932 IEM_MC_BEGIN(3, 2);
9933 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9934 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9935 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9936 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9937
9938 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9939 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9940 IEM_MC_FETCH_EFLAGS(EFlags);
9941 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9942
9943 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9944 IEM_MC_COMMIT_EFLAGS(EFlags);
9945 IEM_MC_ADVANCE_RIP();
9946 IEM_MC_END();
9947 return VINF_SUCCESS;
9948
9949 case IEMMODE_32BIT:
9950 IEM_MC_BEGIN(3, 2);
9951 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9952 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9953 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9954 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9955
9956 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9957 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9958 IEM_MC_FETCH_EFLAGS(EFlags);
9959 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9960
9961 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9962 IEM_MC_COMMIT_EFLAGS(EFlags);
9963 IEM_MC_ADVANCE_RIP();
9964 IEM_MC_END();
9965 return VINF_SUCCESS;
9966
9967 case IEMMODE_64BIT:
9968 IEM_MC_BEGIN(3, 2);
9969 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9970 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9971 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9972 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9973
9974 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9975 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9976 IEM_MC_FETCH_EFLAGS(EFlags);
9977 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9978
9979 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9980 IEM_MC_COMMIT_EFLAGS(EFlags);
9981 IEM_MC_ADVANCE_RIP();
9982 IEM_MC_END();
9983 return VINF_SUCCESS;
9984
9985 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9986 }
9987 }
9988}
9989
9990
9991/** Opcode 0xd2. */
9992FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9993{
9994 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9995 PCIEMOPSHIFTSIZES pImpl;
9996 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9997 {
9998 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9999 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
10000 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
10001 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
10002 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
10003 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
10004 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
10005 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10006 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
10007 }
10008 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
10009
10010 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10011 {
10012 /* register */
10013 IEMOP_HLP_NO_LOCK_PREFIX();
10014 IEM_MC_BEGIN(3, 0);
10015 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10016 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10017 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10018 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10019 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10020 IEM_MC_REF_EFLAGS(pEFlags);
10021 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
10022 IEM_MC_ADVANCE_RIP();
10023 IEM_MC_END();
10024 }
10025 else
10026 {
10027 /* memory */
10028 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10029 IEM_MC_BEGIN(3, 2);
10030 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10031 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10032 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
10033 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10034
10035 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10036 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10037 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10038 IEM_MC_FETCH_EFLAGS(EFlags);
10039 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
10040
10041 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10042 IEM_MC_COMMIT_EFLAGS(EFlags);
10043 IEM_MC_ADVANCE_RIP();
10044 IEM_MC_END();
10045 }
10046 return VINF_SUCCESS;
10047}
10048
10049
10050/** Opcode 0xd3. */
10051FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
10052{
10053 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10054 PCIEMOPSHIFTSIZES pImpl;
10055 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10056 {
10057 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
10058 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
10059 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
10060 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
10061 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
10062 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
10063 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
10064 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10065 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
10066 }
10067 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
10068
10069 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10070 {
10071 /* register */
10072 IEMOP_HLP_NO_LOCK_PREFIX();
10073 switch (pIemCpu->enmEffOpSize)
10074 {
10075 case IEMMODE_16BIT:
10076 IEM_MC_BEGIN(3, 0);
10077 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10078 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10079 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10080 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10081 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10082 IEM_MC_REF_EFLAGS(pEFlags);
10083 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
10084 IEM_MC_ADVANCE_RIP();
10085 IEM_MC_END();
10086 return VINF_SUCCESS;
10087
10088 case IEMMODE_32BIT:
10089 IEM_MC_BEGIN(3, 0);
10090 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10091 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10092 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10093 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10094 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10095 IEM_MC_REF_EFLAGS(pEFlags);
10096 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
10097 IEM_MC_ADVANCE_RIP();
10098 IEM_MC_END();
10099 return VINF_SUCCESS;
10100
10101 case IEMMODE_64BIT:
10102 IEM_MC_BEGIN(3, 0);
10103 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10104 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10105 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10106 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10107 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10108 IEM_MC_REF_EFLAGS(pEFlags);
10109 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
10110 IEM_MC_ADVANCE_RIP();
10111 IEM_MC_END();
10112 return VINF_SUCCESS;
10113
10114 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10115 }
10116 }
10117 else
10118 {
10119 /* memory */
10120 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10121 switch (pIemCpu->enmEffOpSize)
10122 {
10123 case IEMMODE_16BIT:
10124 IEM_MC_BEGIN(3, 2);
10125 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10126 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10127 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
10128 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10129
10130 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10131 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10132 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10133 IEM_MC_FETCH_EFLAGS(EFlags);
10134 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
10135
10136 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10137 IEM_MC_COMMIT_EFLAGS(EFlags);
10138 IEM_MC_ADVANCE_RIP();
10139 IEM_MC_END();
10140 return VINF_SUCCESS;
10141
10142 case IEMMODE_32BIT:
10143 IEM_MC_BEGIN(3, 2);
10144 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10145 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10146 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
10147 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10148
10149 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10150 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10151 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10152 IEM_MC_FETCH_EFLAGS(EFlags);
10153 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
10154
10155 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10156 IEM_MC_COMMIT_EFLAGS(EFlags);
10157 IEM_MC_ADVANCE_RIP();
10158 IEM_MC_END();
10159 return VINF_SUCCESS;
10160
10161 case IEMMODE_64BIT:
10162 IEM_MC_BEGIN(3, 2);
10163 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10164 IEM_MC_ARG(uint8_t, cShiftArg, 1);
10165 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
10166 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10167
10168 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10169 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
10170 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10171 IEM_MC_FETCH_EFLAGS(EFlags);
10172 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
10173
10174 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10175 IEM_MC_COMMIT_EFLAGS(EFlags);
10176 IEM_MC_ADVANCE_RIP();
10177 IEM_MC_END();
10178 return VINF_SUCCESS;
10179
10180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10181 }
10182 }
10183}
10184
10185/** Opcode 0xd4. */
10186FNIEMOP_DEF(iemOp_aam_Ib)
10187{
10188 IEMOP_MNEMONIC("aam Ib");
10189 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
10190 IEMOP_HLP_NO_LOCK_PREFIX();
10191 IEMOP_HLP_NO_64BIT();
10192 if (!bImm)
10193 return IEMOP_RAISE_DIVIDE_ERROR();
10194 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
10195}
10196
10197
10198/** Opcode 0xd5. */
10199FNIEMOP_DEF(iemOp_aad_Ib)
10200{
10201 IEMOP_MNEMONIC("aad Ib");
10202 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
10203 IEMOP_HLP_NO_LOCK_PREFIX();
10204 IEMOP_HLP_NO_64BIT();
10205 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
10206}
10207
10208
10209/** Opcode 0xd7. */
10210FNIEMOP_DEF(iemOp_xlat)
10211{
10212 IEMOP_MNEMONIC("xlat");
10213 IEMOP_HLP_NO_LOCK_PREFIX();
10214 switch (pIemCpu->enmEffAddrMode)
10215 {
10216 case IEMMODE_16BIT:
10217 IEM_MC_BEGIN(2, 0);
10218 IEM_MC_LOCAL(uint8_t, u8Tmp);
10219 IEM_MC_LOCAL(uint16_t, u16Addr);
10220 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
10221 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
10222 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
10223 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10224 IEM_MC_ADVANCE_RIP();
10225 IEM_MC_END();
10226 return VINF_SUCCESS;
10227
10228 case IEMMODE_32BIT:
10229 IEM_MC_BEGIN(2, 0);
10230 IEM_MC_LOCAL(uint8_t, u8Tmp);
10231 IEM_MC_LOCAL(uint32_t, u32Addr);
10232 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
10233 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
10234 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
10235 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10236 IEM_MC_ADVANCE_RIP();
10237 IEM_MC_END();
10238 return VINF_SUCCESS;
10239
10240 case IEMMODE_64BIT:
10241 IEM_MC_BEGIN(2, 0);
10242 IEM_MC_LOCAL(uint8_t, u8Tmp);
10243 IEM_MC_LOCAL(uint64_t, u64Addr);
10244 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
10245 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
10246 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
10247 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10248 IEM_MC_ADVANCE_RIP();
10249 IEM_MC_END();
10250 return VINF_SUCCESS;
10251
10252 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10253 }
10254}
10255
10256
10257/** Opcode 0xd8 11/0. */
10258FNIEMOP_STUB_1(iemOp_fadd_stN, uint8_t, bRm);
10259
10260/** Opcode 0xd8 11/1. */
10261FNIEMOP_STUB_1(iemOp_fmul_stN, uint8_t, bRm);
10262
10263/** Opcode 0xd8 11/2. */
10264FNIEMOP_STUB_1(iemOp_fcom_stN, uint8_t, bRm);
10265
10266/** Opcode 0xd8 11/3. */
10267FNIEMOP_STUB_1(iemOp_fcomp_stN, uint8_t, bRm);
10268
10269/** Opcode 0xd8 11/4. */
10270FNIEMOP_STUB_1(iemOp_fsub_stN, uint8_t, bRm);
10271
10272/** Opcode 0xd8 11/5. */
10273FNIEMOP_STUB_1(iemOp_fsubr_stN, uint8_t, bRm);
10274
10275/** Opcode 0xd8 11/6. */
10276FNIEMOP_STUB_1(iemOp_fdiv_stN, uint8_t, bRm);
10277
10278/** Opcode 0xd8 11/7. */
10279FNIEMOP_STUB_1(iemOp_fdivr_stN, uint8_t, bRm);
10280
10281/** Opcode 0xd8 !11/0. */
10282FNIEMOP_STUB_1(iemOp_fadd_m32r, uint8_t, bRm);
10283
10284/** Opcode 0xd8 !11/1. */
10285FNIEMOP_STUB_1(iemOp_fmul_m32r, uint8_t, bRm);
10286
10287/** Opcode 0xd8 !11/2. */
10288FNIEMOP_STUB_1(iemOp_fcom_m32r, uint8_t, bRm);
10289
10290/** Opcode 0xd8 !11/3. */
10291FNIEMOP_STUB_1(iemOp_fcomp_m32r, uint8_t, bRm);
10292
10293/** Opcode 0xd8 !11/4. */
10294FNIEMOP_STUB_1(iemOp_fsub_m32r, uint8_t, bRm);
10295
10296/** Opcode 0xd8 !11/5. */
10297FNIEMOP_STUB_1(iemOp_fsubr_m32r, uint8_t, bRm);
10298
10299/** Opcode 0xd8 !11/6. */
10300FNIEMOP_STUB_1(iemOp_fdiv_m32r, uint8_t, bRm);
10301
10302/** Opcode 0xd8 !11/7. */
10303FNIEMOP_STUB_1(iemOp_fdivr_m32r, uint8_t, bRm);
10304
10305/** Opcode 0xd8. */
10306FNIEMOP_DEF(iemOp_EscF0)
10307{
10308 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
10309 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10310
10311 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10312 {
10313 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10314 {
10315 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN, bRm);
10316 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN, bRm);
10317 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm);
10318 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
10319 case 4: return FNIEMOP_CALL_1(iemOp_fsub_stN, bRm);
10320 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_stN, bRm);
10321 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_stN, bRm);
10322 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_stN, bRm);
10323 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10324 }
10325 }
10326 else
10327 {
10328 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10329 {
10330 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m32r, bRm);
10331 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m32r, bRm);
10332 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m32r, bRm);
10333 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m32r, bRm);
10334 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m32r, bRm);
10335 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m32r, bRm);
10336 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m32r, bRm);
10337 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m32r, bRm);
10338 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10339 }
10340 }
10341}
10342
10343
10344/** Opcode 0xd9 /0 mem32real */
10345FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm)
10346{
10347 IEMOP_MNEMONIC("fld m32r");
10348 IEMOP_HLP_NO_LOCK_PREFIX();
10349
10350 IEM_MC_BEGIN(2, 2);
10351 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10352 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
10353 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
10354 IEM_MC_ARG(RTFLOAT32U, r32Val, 1);
10355
10356 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
10357 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10358 IEM_MC_MAYBE_RAISE_FPU_XCPT();
10359 IEM_MC_FETCH_MEM_R32(r32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
10360 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fpu_r32_to_r80, pFpuRes, r32Val);
10361
10362 IEM_MC_PUSH_FPU_RESULT(FpuRes);
10363 IEM_MC_ADVANCE_RIP();
10364
10365 IEM_MC_END();
10366 return VINF_SUCCESS;
10367}
10368
10369
10370/** Opcode 0xd9 /0 stN */
10371FNIEMOP_STUB_1(iemOp_fld_stN, uint8_t, bRm);
10372
10373/** Opcode 0xd9 /2 mem32real */
10374FNIEMOP_STUB_1(iemOp_fst_m32r, uint8_t, bRm);
10375
10376/** Opcode 0xd9 /3 stN */
10377FNIEMOP_STUB_1(iemOp_fxch_stN, uint8_t, bRm);
10378
10379/** Opcode 0xd9 /3 */
10380FNIEMOP_STUB_1(iemOp_fstp_m32r, uint8_t, bRm);
10381
10382/** Opcode 0xd9 /4 */
10383FNIEMOP_STUB_1(iemOp_fldenv, uint8_t, bRm);
10384
10385/** Opcode 0xd9 /5 */
10386FNIEMOP_STUB_1(iemOp_fldcw, uint8_t, bRm);
10387
10388/** Opcode 0xd9 /6 */
10389FNIEMOP_STUB_1(iemOp_fstenv, uint8_t, bRm);
10390
10391/** Opcode 0xd9 /7 */
10392FNIEMOP_STUB_1(iemOp_fstcw, uint8_t, bRm);
10393
10394/** Opcode 0xd9 0xc9, 0xd9 0xd8-0xdf. */
10395FNIEMOP_STUB(iemOp_fnop);
10396
10397/** Opcode 0xd9 0xe0. */
10398FNIEMOP_STUB(iemOp_fchs);
10399
10400/** Opcode 0xd9 0xe1. */
10401FNIEMOP_STUB(iemOp_fabs);
10402
10403/** Opcode 0xd9 0xe4. */
10404FNIEMOP_STUB(iemOp_ftst);
10405
10406/** Opcode 0xd9 0xe5. */
10407FNIEMOP_STUB(iemOp_fxam);
10408
10409/** Opcode 0xd9 0xe8. */
10410FNIEMOP_STUB(iemOp_fld1);
10411
10412/** Opcode 0xd9 0xe9. */
10413FNIEMOP_STUB(iemOp_fldl2t);
10414
10415/** Opcode 0xd9 0xea. */
10416FNIEMOP_STUB(iemOp_fldl2e);
10417
10418/** Opcode 0xd9 0xeb. */
10419FNIEMOP_STUB(iemOp_fldpi);
10420
10421/** Opcode 0xd9 0xec. */
10422FNIEMOP_STUB(iemOp_fldlg2);
10423
10424/** Opcode 0xd9 0xed. */
10425FNIEMOP_STUB(iemOp_fldln2);
10426
10427/** Opcode 0xd9 0xee. */
10428FNIEMOP_STUB(iemOp_fldz);
10429
10430/** Opcode 0xd9 0xf0. */
10431FNIEMOP_STUB(iemOp_f2xm1);
10432
10433/** Opcode 0xd9 0xf1. */
10434FNIEMOP_STUB(iemOp_fylx2);
10435
10436/** Opcode 0xd9 0xf2. */
10437FNIEMOP_STUB(iemOp_fptan);
10438
10439/** Opcode 0xd9 0xf3. */
10440FNIEMOP_STUB(iemOp_fpatan);
10441
10442/** Opcode 0xd9 0xf4. */
10443FNIEMOP_STUB(iemOp_fxtract);
10444
10445/** Opcode 0xd9 0xf5. */
10446FNIEMOP_STUB(iemOp_fprem1);
10447
10448/** Opcode 0xd9 0xf6. */
10449FNIEMOP_STUB(iemOp_fdecstp);
10450
10451/** Opcode 0xd9 0xf7. */
10452FNIEMOP_STUB(iemOp_fincstp);
10453
10454/** Opcode 0xd9 0xf8. */
10455FNIEMOP_STUB(iemOp_fprem);
10456
10457/** Opcode 0xd9 0xf9. */
10458FNIEMOP_STUB(iemOp_fyl2xp1);
10459
10460/** Opcode 0xd9 0xfa. */
10461FNIEMOP_STUB(iemOp_fsqrt);
10462
10463/** Opcode 0xd9 0xfb. */
10464FNIEMOP_STUB(iemOp_fsincos);
10465
10466/** Opcode 0xd9 0xfc. */
10467FNIEMOP_STUB(iemOp_frndint);
10468
10469/** Opcode 0xd9 0xfd. */
10470FNIEMOP_STUB(iemOp_fscale);
10471
10472/** Opcode 0xd9 0xfe. */
10473FNIEMOP_STUB(iemOp_fsin);
10474
10475/** Opcode 0xd9 0xff. */
10476FNIEMOP_STUB(iemOp_fcos);
10477
10478
10479/** Used by iemOp_EscF1. */
10480static const PFNIEMOP g_apfnEscF1_E0toFF[32] =
10481{
10482 /* 0xe0 */ iemOp_fchs,
10483 /* 0xe1 */ iemOp_fabs,
10484 /* 0xe2 */ iemOp_Invalid,
10485 /* 0xe3 */ iemOp_Invalid,
10486 /* 0xe4 */ iemOp_ftst,
10487 /* 0xe5 */ iemOp_fxam,
10488 /* 0xe6 */ iemOp_Invalid,
10489 /* 0xe7 */ iemOp_Invalid,
10490 /* 0xe8 */ iemOp_fld1,
10491 /* 0xe9 */ iemOp_fldl2t,
10492 /* 0xea */ iemOp_fldl2e,
10493 /* 0xeb */ iemOp_fldpi,
10494 /* 0xec */ iemOp_fldlg2,
10495 /* 0xed */ iemOp_fldln2,
10496 /* 0xee */ iemOp_fldz,
10497 /* 0xef */ iemOp_Invalid,
10498 /* 0xf0 */ iemOp_f2xm1,
10499 /* 0xf1 */ iemOp_fylx2,
10500 /* 0xf2 */ iemOp_fptan,
10501 /* 0xf3 */ iemOp_fpatan,
10502 /* 0xf4 */ iemOp_fxtract,
10503 /* 0xf5 */ iemOp_fprem1,
10504 /* 0xf6 */ iemOp_fdecstp,
10505 /* 0xf7 */ iemOp_fincstp,
10506 /* 0xf8 */ iemOp_fprem,
10507 /* 0xf9 */ iemOp_fyl2xp1,
10508 /* 0xfa */ iemOp_fsqrt,
10509 /* 0xfb */ iemOp_fsincos,
10510 /* 0xfc */ iemOp_frndint,
10511 /* 0xfd */ iemOp_fscale,
10512 /* 0xfe */ iemOp_fsin,
10513 /* 0xff */ iemOp_fcos
10514};
10515
10516
10517/** Opcode 0xd9. */
10518FNIEMOP_DEF(iemOp_EscF1)
10519{
10520 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
10521 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10522 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10523 {
10524 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10525 {
10526 case 0: return FNIEMOP_CALL_1(iemOp_fld_stN, bRm);
10527 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm);
10528 case 2:
10529 if (bRm == 0xc9)
10530 return FNIEMOP_CALL(iemOp_fnop);
10531 return IEMOP_RAISE_INVALID_OPCODE();
10532 case 3:
10533 return FNIEMOP_CALL(iemOp_fnop); /* AMD says reserved; tests on intel indicates FNOP. */
10534 case 4:
10535 case 5:
10536 case 6:
10537 case 7:
10538 return FNIEMOP_CALL(g_apfnEscF1_E0toFF[(bRm & (X86_MODRM_REG_MASK |X86_MODRM_RM_MASK)) - 0xe0]);
10539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10540 }
10541 }
10542 else
10543 {
10544 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10545 {
10546 case 0: return FNIEMOP_CALL_1(iemOp_fld_m32r, bRm);
10547 case 1: return IEMOP_RAISE_INVALID_OPCODE();
10548 case 2: return FNIEMOP_CALL_1(iemOp_fst_m32r, bRm);
10549 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m32r, bRm);
10550 case 4: return FNIEMOP_CALL_1(iemOp_fldenv, bRm);
10551 case 5: return FNIEMOP_CALL_1(iemOp_fldcw, bRm);
10552 case 6: return FNIEMOP_CALL_1(iemOp_fstenv, bRm);
10553 case 7: return FNIEMOP_CALL_1(iemOp_fstcw, bRm);
10554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10555 }
10556 }
10557}
10558
10559
10560/** Opcode 0xda 11/0. */
10561FNIEMOP_STUB_1(iemOp_fcmovb_stN, uint8_t, bRm);
10562/** Opcode 0xda 11/1. */
10563FNIEMOP_STUB_1(iemOp_fcmove_stN, uint8_t, bRm);
10564/** Opcode 0xda 11/2. */
10565FNIEMOP_STUB_1(iemOp_fcmovbe_stN, uint8_t, bRm);
10566/** Opcode 0xda 11/3. */
10567FNIEMOP_STUB_1(iemOp_fcmovu_stN, uint8_t, bRm);
10568/** Opcode 0xda 0xe9. */
10569FNIEMOP_STUB(iemOp_fucompp);
10570/** Opcode 0xda !11/0. */
10571FNIEMOP_STUB_1(iemOp_fiadd_m32i, uint8_t, bRm);
10572/** Opcode 0xda !11/1. */
10573FNIEMOP_STUB_1(iemOp_fimul_m32i, uint8_t, bRm);
10574/** Opcode 0xda !11/2. */
10575FNIEMOP_STUB_1(iemOp_ficom_m32i, uint8_t, bRm);
10576/** Opcode 0xda !11/3. */
10577FNIEMOP_STUB_1(iemOp_ficomp_m32i, uint8_t, bRm);
10578/** Opcode 0xda !11/4. */
10579FNIEMOP_STUB_1(iemOp_fisub_m32i, uint8_t, bRm);
10580/** Opcode 0xda !11/5. */
10581FNIEMOP_STUB_1(iemOp_fisubr_m32i, uint8_t, bRm);
10582/** Opcode 0xda !11/6. */
10583FNIEMOP_STUB_1(iemOp_fidiv_m32i, uint8_t, bRm);
10584/** Opcode 0xda !11/7. */
10585FNIEMOP_STUB_1(iemOp_fidivr_m32i, uint8_t, bRm);
10586
10587/** Opcode 0xda. */
10588FNIEMOP_DEF(iemOp_EscF2)
10589{
10590 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
10591 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10592 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10593 {
10594 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10595 {
10596 case 0: return FNIEMOP_CALL_1(iemOp_fcmovb_stN, bRm);
10597 case 1: return FNIEMOP_CALL_1(iemOp_fcmove_stN, bRm);
10598 case 2: return FNIEMOP_CALL_1(iemOp_fcmovbe_stN, bRm);
10599 case 3: return FNIEMOP_CALL_1(iemOp_fcmovu_stN, bRm);
10600 case 4: return IEMOP_RAISE_INVALID_OPCODE();
10601 case 5:
10602 if (bRm == 0xe9)
10603 return FNIEMOP_CALL(iemOp_fucompp);
10604 return IEMOP_RAISE_INVALID_OPCODE();
10605 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10606 case 7: return IEMOP_RAISE_INVALID_OPCODE();
10607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10608 }
10609 }
10610 else
10611 {
10612 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10613 {
10614 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m32i, bRm);
10615 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m32i, bRm);
10616 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m32i, bRm);
10617 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m32i, bRm);
10618 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m32i, bRm);
10619 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m32i, bRm);
10620 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m32i, bRm);
10621 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m32i, bRm);
10622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10623 }
10624 }
10625}
10626
10627
10628/** Opcode 0xdb !11/0. */
10629FNIEMOP_STUB_1(iemOp_fild_m32i, uint8_t, bRm);
10630
10631/** Opcode 0xdb !11/1. */
10632FNIEMOP_STUB_1(iemOp_fisttp_m32i, uint8_t, bRm);
10633
10634/** Opcode 0xdb !11/2. */
10635FNIEMOP_STUB_1(iemOp_fist_m32i, uint8_t, bRm);
10636
10637/** Opcode 0xdb !11/3. */
10638FNIEMOP_STUB_1(iemOp_fistp_m32i, uint8_t, bRm);
10639
10640/** Opcode 0xdb !11/5. */
10641FNIEMOP_STUB_1(iemOp_fld_r80, uint8_t, bRm);
10642
10643/** Opcode 0xdb !11/7. */
10644FNIEMOP_STUB_1(iemOp_fstp_r80, uint8_t, bRm);
10645
10646/** Opcode 0xdb 11/0. */
10647FNIEMOP_STUB_1(iemOp_fcmovnb, uint8_t, bRm);
10648
10649/** Opcode 0xdb 11/1. */
10650FNIEMOP_STUB_1(iemOp_fcmovne, uint8_t, bRm);
10651
10652/** Opcode 0xdb 11/2. */
10653FNIEMOP_STUB_1(iemOp_fcmovnbe, uint8_t, bRm);
10654
10655/** Opcode 0xdb 11/3. */
10656FNIEMOP_STUB_1(iemOp_fcmovnnu, uint8_t, bRm);
10657
10658
10659/** Opcode 0xdb 0xe0. */
10660FNIEMOP_DEF(iemOp_fneni)
10661{
10662 IEMOP_MNEMONIC("fneni (8087/ign)");
10663 IEM_MC_BEGIN(0,0);
10664 IEM_MC_ADVANCE_RIP();
10665 IEM_MC_END();
10666 return VINF_SUCCESS;
10667}
10668
10669
10670/** Opcode 0xdb 0xe1. */
10671FNIEMOP_DEF(iemOp_fndisi)
10672{
10673 IEMOP_MNEMONIC("fndisi (8087/ign)");
10674 IEM_MC_BEGIN(0,0);
10675 IEM_MC_ADVANCE_RIP();
10676 IEM_MC_END();
10677 return VINF_SUCCESS;
10678}
10679
10680
10681/** Opcode 0xdb 0xe2. */
10682FNIEMOP_STUB(iemOp_fnclex);
10683
10684
10685/** Opcode 0xdb 0xe3. */
10686FNIEMOP_DEF(iemOp_fninit)
10687{
10688 IEMOP_MNEMONIC("fninit");
10689 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
10690}
10691
10692
10693/** Opcode 0xdb 0xe4. */
10694FNIEMOP_DEF(iemOp_fnsetpm)
10695{
10696 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
10697 IEM_MC_BEGIN(0,0);
10698 IEM_MC_ADVANCE_RIP();
10699 IEM_MC_END();
10700 return VINF_SUCCESS;
10701}
10702
10703
10704/** Opcode 0xdb 0xe5. */
10705FNIEMOP_DEF(iemOp_frstpm)
10706{
10707 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
10708#if 0 /* #UDs on newer CPUs */
10709 IEM_MC_BEGIN(0,0);
10710 IEM_MC_ADVANCE_RIP();
10711 IEM_MC_END();
10712 return VINF_SUCCESS;
10713#else
10714 return IEMOP_RAISE_INVALID_OPCODE();
10715#endif
10716}
10717
10718
10719/** Opcode 0xdb 11/5. */
10720FNIEMOP_STUB_1(iemOp_fucomi, uint8_t, bRm);
10721
10722/** Opcode 0xdb 11/6. */
10723FNIEMOP_STUB_1(iemOp_fcomi, uint8_t, bRm);
10724
10725
10726/** Opcode 0xdb. */
10727FNIEMOP_DEF(iemOp_EscF3)
10728{
10729 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
10730 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10731 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10732 {
10733 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10734 {
10735 case 0: FNIEMOP_CALL_1(iemOp_fcmovnb, bRm);
10736 case 1: FNIEMOP_CALL_1(iemOp_fcmovne, bRm);
10737 case 2: FNIEMOP_CALL_1(iemOp_fcmovnbe, bRm);
10738 case 3: FNIEMOP_CALL_1(iemOp_fcmovnnu, bRm);
10739 case 4:
10740 IEMOP_HLP_NO_LOCK_PREFIX();
10741 switch (bRm)
10742 {
10743 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
10744 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
10745 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
10746 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
10747 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
10748 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
10749 case 0xe6: return IEMOP_RAISE_INVALID_OPCODE();
10750 case 0xe7: return IEMOP_RAISE_INVALID_OPCODE();
10751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10752 }
10753 break;
10754 case 5: return FNIEMOP_CALL_1(iemOp_fucomi, bRm);
10755 case 6: return FNIEMOP_CALL_1(iemOp_fcomi, bRm);
10756 case 7: return IEMOP_RAISE_INVALID_OPCODE();
10757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10758 }
10759 }
10760 else
10761 {
10762 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10763 {
10764 case 0: return FNIEMOP_CALL_1(iemOp_fild_m32i, bRm);
10765 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m32i,bRm);
10766 case 2: return FNIEMOP_CALL_1(iemOp_fist_m32i, bRm);
10767 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m32i, bRm);
10768 case 4: return IEMOP_RAISE_INVALID_OPCODE();
10769 case 5: return FNIEMOP_CALL_1(iemOp_fld_r80, bRm);
10770 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10771 case 7: return FNIEMOP_CALL_1(iemOp_fstp_r80, bRm);
10772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10773 }
10774 }
10775}
10776
10777/** Opcode 0xdc. */
10778FNIEMOP_STUB(iemOp_EscF4);
10779
10780
10781/** Opcode 0xdd !11/0. */
10782FNIEMOP_STUB_1(iemOp_fld_m64r, uint8_t, bRm);
10783
10784/** Opcode 0xdd !11/0. */
10785FNIEMOP_STUB_1(iemOp_fisttp_m64i, uint8_t, bRm);
10786
10787/** Opcode 0xdd !11/0. */
10788FNIEMOP_STUB_1(iemOp_fst_m64r, uint8_t, bRm);
10789
10790/** Opcode 0xdd !11/0. */
10791FNIEMOP_STUB_1(iemOp_fstp_m64r, uint8_t, bRm);
10792
10793/** Opcode 0xdd !11/0. */
10794FNIEMOP_STUB_1(iemOp_frstor, uint8_t, bRm);
10795
10796/** Opcode 0xdd !11/0. */
10797FNIEMOP_STUB_1(iemOp_fnsave, uint8_t, bRm);
10798
10799/** Opcode 0xdd !11/0. */
10800FNIEMOP_STUB_1(iemOp_fnstsw, uint8_t, bRm);
10801
10802/** Opcode 0xdd 11/0. */
10803FNIEMOP_STUB_1(iemOp_ffree_stN, uint8_t, bRm);
10804
10805/** Opcode 0xdd 11/1. */
10806FNIEMOP_STUB_1(iemOp_fst_stN, uint8_t, bRm);
10807
10808/** Opcode 0xdd 11/2. */
10809FNIEMOP_STUB_1(iemOp_fstp_stN, uint8_t, bRm);
10810
10811/** Opcode 0xdd 11/3. */
10812FNIEMOP_STUB_1(iemOp_fucom_stN, uint8_t, bRm);
10813
10814/** Opcode 0xdd 11/4. */
10815FNIEMOP_STUB_1(iemOp_fucomp_stN, uint8_t, bRm);
10816
10817/** Opcode 0xdd. */
10818FNIEMOP_DEF(iemOp_EscF5)
10819{
10820 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10821 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10822 {
10823 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10824 {
10825 case 0: return FNIEMOP_CALL_1(iemOp_ffree_stN, bRm);
10826 case 1: return FNIEMOP_CALL( iemOp_fnop);
10827 case 2: return FNIEMOP_CALL_1(iemOp_fst_stN, bRm);
10828 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm);
10829 case 4: return FNIEMOP_CALL_1(iemOp_fucom_stN, bRm);
10830 case 5: return FNIEMOP_CALL_1(iemOp_fucomp_stN, bRm);
10831 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10832 case 7: return IEMOP_RAISE_INVALID_OPCODE();
10833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10834 }
10835 }
10836 else
10837 {
10838 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10839 {
10840 case 0: return FNIEMOP_CALL_1(iemOp_fld_m64r, bRm);
10841 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m64i, bRm);
10842 case 2: return FNIEMOP_CALL_1(iemOp_fst_m64r, bRm);
10843 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m64r, bRm);
10844 case 4: return FNIEMOP_CALL_1(iemOp_frstor, bRm);
10845 case 5: return IEMOP_RAISE_INVALID_OPCODE();
10846 case 6: return FNIEMOP_CALL_1(iemOp_fnsave, bRm);
10847 case 7: return FNIEMOP_CALL_1(iemOp_fnstsw, bRm);
10848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10849 }
10850 }
10851}
10852
10853
10854/** Opcode 0xde 0xd9. */
10855FNIEMOP_STUB(iemOp_fcompp);
10856
10857/** Opcode 0xde. */
10858FNIEMOP_DEF(iemOp_EscF6)
10859{
10860 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10861 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10862 {
10863 switch (bRm & 0xf8)
10864 {
10865 case 0xc0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fiaddp
10866 case 0xc8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fimulp
10867 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10868 case 0xd8:
10869 switch (bRm)
10870 {
10871 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
10872 default: return IEMOP_RAISE_INVALID_OPCODE();
10873 }
10874 case 0xe0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fsubrp
10875 case 0xe8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fsubp
10876 case 0xf0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fdivrp
10877 case 0xf8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fdivp
10878 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10879 }
10880 }
10881 else
10882 {
10883#if 0
10884 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10885 {
10886 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
10887 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
10888 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
10889 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
10890 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
10891 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
10892 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
10893 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
10894 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10895 }
10896#endif
10897 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
10898 }
10899}
10900
10901
10902/** Opcode 0xdf 0xe0. */
10903FNIEMOP_DEF(iemOp_fnstsw_ax)
10904{
10905 IEMOP_MNEMONIC("fnstsw ax");
10906 IEMOP_HLP_NO_LOCK_PREFIX();
10907
10908 IEM_MC_BEGIN(0, 1);
10909 IEM_MC_LOCAL(uint16_t, u16Tmp);
10910 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10911 IEM_MC_FETCH_FSW(u16Tmp);
10912 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10913 IEM_MC_ADVANCE_RIP();
10914 IEM_MC_END();
10915 return VINF_SUCCESS;
10916}
10917
10918
10919/** Opcode 0xdf. */
10920FNIEMOP_DEF(iemOp_EscF7)
10921{
10922 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10923 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10924 {
10925 switch (bRm & 0xf8)
10926 {
10927 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
10928 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
10929 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10930 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
10931 case 0xe0:
10932 switch (bRm)
10933 {
10934 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
10935 default: return IEMOP_RAISE_INVALID_OPCODE();
10936 }
10937 case 0xe8: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fucomip
10938 case 0xf0: AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); // fcomip
10939 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10940 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10941 }
10942 }
10943 else
10944 {
10945 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
10946 }
10947}
10948
10949
10950/** Opcode 0xe0. */
10951FNIEMOP_DEF(iemOp_loopne_Jb)
10952{
10953 IEMOP_MNEMONIC("loopne Jb");
10954 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10955 IEMOP_HLP_NO_LOCK_PREFIX();
10956 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10957
10958 switch (pIemCpu->enmEffAddrMode)
10959 {
10960 case IEMMODE_16BIT:
10961 IEM_MC_BEGIN(0,0);
10962 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10963 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10964 IEM_MC_REL_JMP_S8(i8Imm);
10965 } IEM_MC_ELSE() {
10966 IEM_MC_ADVANCE_RIP();
10967 } IEM_MC_ENDIF();
10968 IEM_MC_END();
10969 return VINF_SUCCESS;
10970
10971 case IEMMODE_32BIT:
10972 IEM_MC_BEGIN(0,0);
10973 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10974 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10975 IEM_MC_REL_JMP_S8(i8Imm);
10976 } IEM_MC_ELSE() {
10977 IEM_MC_ADVANCE_RIP();
10978 } IEM_MC_ENDIF();
10979 IEM_MC_END();
10980 return VINF_SUCCESS;
10981
10982 case IEMMODE_64BIT:
10983 IEM_MC_BEGIN(0,0);
10984 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10985 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10986 IEM_MC_REL_JMP_S8(i8Imm);
10987 } IEM_MC_ELSE() {
10988 IEM_MC_ADVANCE_RIP();
10989 } IEM_MC_ENDIF();
10990 IEM_MC_END();
10991 return VINF_SUCCESS;
10992
10993 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10994 }
10995}
10996
10997
10998/** Opcode 0xe1. */
10999FNIEMOP_DEF(iemOp_loope_Jb)
11000{
11001 IEMOP_MNEMONIC("loope Jb");
11002 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
11003 IEMOP_HLP_NO_LOCK_PREFIX();
11004 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11005
11006 switch (pIemCpu->enmEffAddrMode)
11007 {
11008 case IEMMODE_16BIT:
11009 IEM_MC_BEGIN(0,0);
11010 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
11011 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
11012 IEM_MC_REL_JMP_S8(i8Imm);
11013 } IEM_MC_ELSE() {
11014 IEM_MC_ADVANCE_RIP();
11015 } IEM_MC_ENDIF();
11016 IEM_MC_END();
11017 return VINF_SUCCESS;
11018
11019 case IEMMODE_32BIT:
11020 IEM_MC_BEGIN(0,0);
11021 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
11022 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
11023 IEM_MC_REL_JMP_S8(i8Imm);
11024 } IEM_MC_ELSE() {
11025 IEM_MC_ADVANCE_RIP();
11026 } IEM_MC_ENDIF();
11027 IEM_MC_END();
11028 return VINF_SUCCESS;
11029
11030 case IEMMODE_64BIT:
11031 IEM_MC_BEGIN(0,0);
11032 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
11033 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
11034 IEM_MC_REL_JMP_S8(i8Imm);
11035 } IEM_MC_ELSE() {
11036 IEM_MC_ADVANCE_RIP();
11037 } IEM_MC_ENDIF();
11038 IEM_MC_END();
11039 return VINF_SUCCESS;
11040
11041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11042 }
11043}
11044
11045
11046/** Opcode 0xe2. */
11047FNIEMOP_DEF(iemOp_loop_Jb)
11048{
11049 IEMOP_MNEMONIC("loop Jb");
11050 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
11051 IEMOP_HLP_NO_LOCK_PREFIX();
11052 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11053
11054 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
11055 * using the 32-bit operand size override. How can that be restarted? See
11056 * weird pseudo code in intel manual. */
11057 switch (pIemCpu->enmEffAddrMode)
11058 {
11059 case IEMMODE_16BIT:
11060 IEM_MC_BEGIN(0,0);
11061 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
11062 IEM_MC_IF_CX_IS_NZ() {
11063 IEM_MC_REL_JMP_S8(i8Imm);
11064 } IEM_MC_ELSE() {
11065 IEM_MC_ADVANCE_RIP();
11066 } IEM_MC_ENDIF();
11067 IEM_MC_END();
11068 return VINF_SUCCESS;
11069
11070 case IEMMODE_32BIT:
11071 IEM_MC_BEGIN(0,0);
11072 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
11073 IEM_MC_IF_ECX_IS_NZ() {
11074 IEM_MC_REL_JMP_S8(i8Imm);
11075 } IEM_MC_ELSE() {
11076 IEM_MC_ADVANCE_RIP();
11077 } IEM_MC_ENDIF();
11078 IEM_MC_END();
11079 return VINF_SUCCESS;
11080
11081 case IEMMODE_64BIT:
11082 IEM_MC_BEGIN(0,0);
11083 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
11084 IEM_MC_IF_RCX_IS_NZ() {
11085 IEM_MC_REL_JMP_S8(i8Imm);
11086 } IEM_MC_ELSE() {
11087 IEM_MC_ADVANCE_RIP();
11088 } IEM_MC_ENDIF();
11089 IEM_MC_END();
11090 return VINF_SUCCESS;
11091
11092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11093 }
11094}
11095
11096
11097/** Opcode 0xe3. */
11098FNIEMOP_DEF(iemOp_jecxz_Jb)
11099{
11100 IEMOP_MNEMONIC("jecxz Jb");
11101 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
11102 IEMOP_HLP_NO_LOCK_PREFIX();
11103 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11104
11105 switch (pIemCpu->enmEffAddrMode)
11106 {
11107 case IEMMODE_16BIT:
11108 IEM_MC_BEGIN(0,0);
11109 IEM_MC_IF_CX_IS_NZ() {
11110 IEM_MC_ADVANCE_RIP();
11111 } IEM_MC_ELSE() {
11112 IEM_MC_REL_JMP_S8(i8Imm);
11113 } IEM_MC_ENDIF();
11114 IEM_MC_END();
11115 return VINF_SUCCESS;
11116
11117 case IEMMODE_32BIT:
11118 IEM_MC_BEGIN(0,0);
11119 IEM_MC_IF_ECX_IS_NZ() {
11120 IEM_MC_ADVANCE_RIP();
11121 } IEM_MC_ELSE() {
11122 IEM_MC_REL_JMP_S8(i8Imm);
11123 } IEM_MC_ENDIF();
11124 IEM_MC_END();
11125 return VINF_SUCCESS;
11126
11127 case IEMMODE_64BIT:
11128 IEM_MC_BEGIN(0,0);
11129 IEM_MC_IF_RCX_IS_NZ() {
11130 IEM_MC_ADVANCE_RIP();
11131 } IEM_MC_ELSE() {
11132 IEM_MC_REL_JMP_S8(i8Imm);
11133 } IEM_MC_ENDIF();
11134 IEM_MC_END();
11135 return VINF_SUCCESS;
11136
11137 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11138 }
11139}
11140
11141
11142/** Opcode 0xe4 */
11143FNIEMOP_DEF(iemOp_in_AL_Ib)
11144{
11145 IEMOP_MNEMONIC("in eAX,Ib");
11146 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11147 IEMOP_HLP_NO_LOCK_PREFIX();
11148 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
11149}
11150
11151
11152/** Opcode 0xe5 */
11153FNIEMOP_DEF(iemOp_in_eAX_Ib)
11154{
11155 IEMOP_MNEMONIC("in eAX,Ib");
11156 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11157 IEMOP_HLP_NO_LOCK_PREFIX();
11158 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
11159}
11160
11161
11162/** Opcode 0xe6 */
11163FNIEMOP_DEF(iemOp_out_Ib_AL)
11164{
11165 IEMOP_MNEMONIC("out Ib,AL");
11166 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11167 IEMOP_HLP_NO_LOCK_PREFIX();
11168 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
11169}
11170
11171
11172/** Opcode 0xe7 */
11173FNIEMOP_DEF(iemOp_out_Ib_eAX)
11174{
11175 IEMOP_MNEMONIC("out Ib,eAX");
11176 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11177 IEMOP_HLP_NO_LOCK_PREFIX();
11178 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
11179}
11180
11181
11182/** Opcode 0xe8. */
11183FNIEMOP_DEF(iemOp_call_Jv)
11184{
11185 IEMOP_MNEMONIC("call Jv");
11186 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11187 switch (pIemCpu->enmEffOpSize)
11188 {
11189 case IEMMODE_16BIT:
11190 {
11191 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11192 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
11193 }
11194
11195 case IEMMODE_32BIT:
11196 {
11197 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
11198 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
11199 }
11200
11201 case IEMMODE_64BIT:
11202 {
11203 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
11204 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
11205 }
11206
11207 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11208 }
11209}
11210
11211
11212/** Opcode 0xe9. */
11213FNIEMOP_DEF(iemOp_jmp_Jv)
11214{
11215 IEMOP_MNEMONIC("jmp Jv");
11216 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11217 switch (pIemCpu->enmEffOpSize)
11218 {
11219 case IEMMODE_16BIT:
11220 {
11221 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
11222 IEM_MC_BEGIN(0, 0);
11223 IEM_MC_REL_JMP_S16(i16Imm);
11224 IEM_MC_END();
11225 return VINF_SUCCESS;
11226 }
11227
11228 case IEMMODE_64BIT:
11229 case IEMMODE_32BIT:
11230 {
11231 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
11232 IEM_MC_BEGIN(0, 0);
11233 IEM_MC_REL_JMP_S32(i32Imm);
11234 IEM_MC_END();
11235 return VINF_SUCCESS;
11236 }
11237
11238 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11239 }
11240}
11241
11242
11243/** Opcode 0xea. */
11244FNIEMOP_DEF(iemOp_jmp_Ap)
11245{
11246 IEMOP_MNEMONIC("jmp Ap");
11247 IEMOP_HLP_NO_64BIT();
11248
11249 /* Decode the far pointer address and pass it on to the far call C implementation. */
11250 uint32_t offSeg;
11251 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
11252 IEM_OPCODE_GET_NEXT_U32(&offSeg);
11253 else
11254 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
11255 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
11256 IEMOP_HLP_NO_LOCK_PREFIX();
11257 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
11258}
11259
11260
11261/** Opcode 0xeb. */
11262FNIEMOP_DEF(iemOp_jmp_Jb)
11263{
11264 IEMOP_MNEMONIC("jmp Jb");
11265 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
11266 IEMOP_HLP_NO_LOCK_PREFIX();
11267 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11268
11269 IEM_MC_BEGIN(0, 0);
11270 IEM_MC_REL_JMP_S8(i8Imm);
11271 IEM_MC_END();
11272 return VINF_SUCCESS;
11273}
11274
11275
11276/** Opcode 0xec */
11277FNIEMOP_DEF(iemOp_in_AL_DX)
11278{
11279 IEMOP_MNEMONIC("in AL,DX");
11280 IEMOP_HLP_NO_LOCK_PREFIX();
11281 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
11282}
11283
11284
11285/** Opcode 0xed */
11286FNIEMOP_DEF(iemOp_eAX_DX)
11287{
11288 IEMOP_MNEMONIC("in eAX,DX");
11289 IEMOP_HLP_NO_LOCK_PREFIX();
11290 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
11291}
11292
11293
11294/** Opcode 0xee */
11295FNIEMOP_DEF(iemOp_out_DX_AL)
11296{
11297 IEMOP_MNEMONIC("out DX,AL");
11298 IEMOP_HLP_NO_LOCK_PREFIX();
11299 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
11300}
11301
11302
11303/** Opcode 0xef */
11304FNIEMOP_DEF(iemOp_out_DX_eAX)
11305{
11306 IEMOP_MNEMONIC("out DX,eAX");
11307 IEMOP_HLP_NO_LOCK_PREFIX();
11308 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
11309}
11310
11311
11312/** Opcode 0xf0. */
11313FNIEMOP_DEF(iemOp_lock)
11314{
11315 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
11316
11317 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
11318 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
11319}
11320
11321
11322/** Opcode 0xf2. */
11323FNIEMOP_DEF(iemOp_repne)
11324{
11325 /* This overrides any previous REPE prefix. */
11326 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
11327 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
11328
11329 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
11330 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
11331}
11332
11333
11334/** Opcode 0xf3. */
11335FNIEMOP_DEF(iemOp_repe)
11336{
11337 /* This overrides any previous REPNE prefix. */
11338 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
11339 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
11340
11341 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
11342 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
11343}
11344
11345
11346/** Opcode 0xf4. */
11347FNIEMOP_DEF(iemOp_hlt)
11348{
11349 IEMOP_HLP_NO_LOCK_PREFIX();
11350 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
11351}
11352
11353
11354/** Opcode 0xf5. */
11355FNIEMOP_DEF(iemOp_cmc)
11356{
11357 IEMOP_MNEMONIC("cmc");
11358 IEMOP_HLP_NO_LOCK_PREFIX();
11359 IEM_MC_BEGIN(0, 0);
11360 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
11361 IEM_MC_ADVANCE_RIP();
11362 IEM_MC_END();
11363 return VINF_SUCCESS;
11364}
11365
11366
11367/**
11368 * Common implementation of 'inc/dec/not/neg Eb'.
11369 *
11370 * @param bRm The RM byte.
11371 * @param pImpl The instruction implementation.
11372 */
11373FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
11374{
11375 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11376 {
11377 /* register access */
11378 IEM_MC_BEGIN(2, 0);
11379 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11380 IEM_MC_ARG(uint32_t *, pEFlags, 1);
11381 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11382 IEM_MC_REF_EFLAGS(pEFlags);
11383 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
11384 IEM_MC_ADVANCE_RIP();
11385 IEM_MC_END();
11386 }
11387 else
11388 {
11389 /* memory access. */
11390 IEM_MC_BEGIN(2, 2);
11391 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11392 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
11393 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11394
11395 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11396 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11397 IEM_MC_FETCH_EFLAGS(EFlags);
11398 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
11399 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
11400 else
11401 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
11402
11403 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
11404 IEM_MC_COMMIT_EFLAGS(EFlags);
11405 IEM_MC_ADVANCE_RIP();
11406 IEM_MC_END();
11407 }
11408 return VINF_SUCCESS;
11409}
11410
11411
11412/**
11413 * Common implementation of 'inc/dec/not/neg Ev'.
11414 *
11415 * @param bRm The RM byte.
11416 * @param pImpl The instruction implementation.
11417 */
11418FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
11419{
11420 /* Registers are handled by a common worker. */
11421 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11422 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11423
11424 /* Memory we do here. */
11425 switch (pIemCpu->enmEffOpSize)
11426 {
11427 case IEMMODE_16BIT:
11428 IEM_MC_BEGIN(2, 2);
11429 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11430 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
11431 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11432
11433 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11434 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11435 IEM_MC_FETCH_EFLAGS(EFlags);
11436 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
11437 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
11438 else
11439 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
11440
11441 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
11442 IEM_MC_COMMIT_EFLAGS(EFlags);
11443 IEM_MC_ADVANCE_RIP();
11444 IEM_MC_END();
11445 return VINF_SUCCESS;
11446
11447 case IEMMODE_32BIT:
11448 IEM_MC_BEGIN(2, 2);
11449 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11450 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
11451 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11452
11453 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11454 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11455 IEM_MC_FETCH_EFLAGS(EFlags);
11456 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
11457 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
11458 else
11459 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
11460
11461 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
11462 IEM_MC_COMMIT_EFLAGS(EFlags);
11463 IEM_MC_ADVANCE_RIP();
11464 IEM_MC_END();
11465 return VINF_SUCCESS;
11466
11467 case IEMMODE_64BIT:
11468 IEM_MC_BEGIN(2, 2);
11469 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11470 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
11471 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11472
11473 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11474 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11475 IEM_MC_FETCH_EFLAGS(EFlags);
11476 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
11477 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
11478 else
11479 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
11480
11481 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
11482 IEM_MC_COMMIT_EFLAGS(EFlags);
11483 IEM_MC_ADVANCE_RIP();
11484 IEM_MC_END();
11485 return VINF_SUCCESS;
11486
11487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11488 }
11489}
11490
11491
11492/** Opcode 0xf6 /0. */
11493FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
11494{
11495 IEMOP_MNEMONIC("test Eb,Ib");
11496 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11497
11498 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11499 {
11500 /* register access */
11501 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11502 IEMOP_HLP_NO_LOCK_PREFIX();
11503
11504 IEM_MC_BEGIN(3, 0);
11505 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11506 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
11507 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11508 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11509 IEM_MC_REF_EFLAGS(pEFlags);
11510 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
11511 IEM_MC_ADVANCE_RIP();
11512 IEM_MC_END();
11513 }
11514 else
11515 {
11516 /* memory access. */
11517 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11518
11519 IEM_MC_BEGIN(3, 2);
11520 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11521 IEM_MC_ARG(uint8_t, u8Src, 1);
11522 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
11523 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11524
11525 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11526 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11527 IEM_MC_ASSIGN(u8Src, u8Imm);
11528 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11529 IEM_MC_FETCH_EFLAGS(EFlags);
11530 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
11531
11532 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
11533 IEM_MC_COMMIT_EFLAGS(EFlags);
11534 IEM_MC_ADVANCE_RIP();
11535 IEM_MC_END();
11536 }
11537 return VINF_SUCCESS;
11538}
11539
11540
11541/** Opcode 0xf7 /0. */
11542FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
11543{
11544 IEMOP_MNEMONIC("test Ev,Iv");
11545 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11546 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11547
11548 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11549 {
11550 /* register access */
11551 switch (pIemCpu->enmEffOpSize)
11552 {
11553 case IEMMODE_16BIT:
11554 {
11555 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11556 IEM_MC_BEGIN(3, 0);
11557 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11558 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
11559 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11560 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11561 IEM_MC_REF_EFLAGS(pEFlags);
11562 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
11563 IEM_MC_ADVANCE_RIP();
11564 IEM_MC_END();
11565 return VINF_SUCCESS;
11566 }
11567
11568 case IEMMODE_32BIT:
11569 {
11570 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
11571 IEM_MC_BEGIN(3, 0);
11572 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11573 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
11574 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11575 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11576 IEM_MC_REF_EFLAGS(pEFlags);
11577 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
11578 IEM_MC_ADVANCE_RIP();
11579 IEM_MC_END();
11580 return VINF_SUCCESS;
11581 }
11582
11583 case IEMMODE_64BIT:
11584 {
11585 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
11586 IEM_MC_BEGIN(3, 0);
11587 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11588 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
11589 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11590 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11591 IEM_MC_REF_EFLAGS(pEFlags);
11592 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
11593 IEM_MC_ADVANCE_RIP();
11594 IEM_MC_END();
11595 return VINF_SUCCESS;
11596 }
11597
11598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11599 }
11600 }
11601 else
11602 {
11603 /* memory access. */
11604 switch (pIemCpu->enmEffOpSize)
11605 {
11606 case IEMMODE_16BIT:
11607 {
11608 IEM_MC_BEGIN(3, 2);
11609 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11610 IEM_MC_ARG(uint16_t, u16Src, 1);
11611 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
11612 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11613
11614 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11615 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11616 IEM_MC_ASSIGN(u16Src, u16Imm);
11617 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11618 IEM_MC_FETCH_EFLAGS(EFlags);
11619 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
11620
11621 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
11622 IEM_MC_COMMIT_EFLAGS(EFlags);
11623 IEM_MC_ADVANCE_RIP();
11624 IEM_MC_END();
11625 return VINF_SUCCESS;
11626 }
11627
11628 case IEMMODE_32BIT:
11629 {
11630 IEM_MC_BEGIN(3, 2);
11631 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11632 IEM_MC_ARG(uint32_t, u32Src, 1);
11633 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
11634 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11635
11636 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11637 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
11638 IEM_MC_ASSIGN(u32Src, u32Imm);
11639 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11640 IEM_MC_FETCH_EFLAGS(EFlags);
11641 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
11642
11643 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
11644 IEM_MC_COMMIT_EFLAGS(EFlags);
11645 IEM_MC_ADVANCE_RIP();
11646 IEM_MC_END();
11647 return VINF_SUCCESS;
11648 }
11649
11650 case IEMMODE_64BIT:
11651 {
11652 IEM_MC_BEGIN(3, 2);
11653 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11654 IEM_MC_ARG(uint64_t, u64Src, 1);
11655 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
11656 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11657
11658 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11659 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
11660 IEM_MC_ASSIGN(u64Src, u64Imm);
11661 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11662 IEM_MC_FETCH_EFLAGS(EFlags);
11663 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
11664
11665 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
11666 IEM_MC_COMMIT_EFLAGS(EFlags);
11667 IEM_MC_ADVANCE_RIP();
11668 IEM_MC_END();
11669 return VINF_SUCCESS;
11670 }
11671
11672 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11673 }
11674 }
11675}
11676
11677
11678/** Opcode 0xf6 /4, /5, /6 and /7. */
11679FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
11680{
11681 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11682
11683 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11684 {
11685 /* register access */
11686 IEMOP_HLP_NO_LOCK_PREFIX();
11687 IEM_MC_BEGIN(3, 0);
11688 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11689 IEM_MC_ARG(uint8_t, u8Value, 1);
11690 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11691 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11692 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11693 IEM_MC_REF_EFLAGS(pEFlags);
11694 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
11695 IEM_MC_ADVANCE_RIP();
11696 IEM_MC_END();
11697 }
11698 else
11699 {
11700 /* memory access. */
11701 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11702
11703 IEM_MC_BEGIN(3, 1);
11704 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11705 IEM_MC_ARG(uint8_t, u8Value, 1);
11706 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11707 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11708
11709 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11710 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
11711 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11712 IEM_MC_REF_EFLAGS(pEFlags);
11713 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
11714
11715 IEM_MC_ADVANCE_RIP();
11716 IEM_MC_END();
11717 }
11718 return VINF_SUCCESS;
11719}
11720
11721
11722/** Opcode 0xf7 /4, /5, /6 and /7. */
11723FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
11724{
11725 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11726 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11727
11728 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11729 {
11730 /* register access */
11731 switch (pIemCpu->enmEffOpSize)
11732 {
11733 case IEMMODE_16BIT:
11734 {
11735 IEMOP_HLP_NO_LOCK_PREFIX();
11736 IEM_MC_BEGIN(4, 1);
11737 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11738 IEM_MC_ARG(uint16_t *, pu16DX, 1);
11739 IEM_MC_ARG(uint16_t, u16Value, 2);
11740 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11741 IEM_MC_LOCAL(int32_t, rc);
11742
11743 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11744 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11745 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11746 IEM_MC_REF_EFLAGS(pEFlags);
11747 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11748 IEM_MC_IF_LOCAL_IS_Z(rc) {
11749 IEM_MC_ADVANCE_RIP();
11750 } IEM_MC_ELSE() {
11751 IEM_MC_RAISE_DIVIDE_ERROR();
11752 } IEM_MC_ENDIF();
11753
11754 IEM_MC_END();
11755 return VINF_SUCCESS;
11756 }
11757
11758 case IEMMODE_32BIT:
11759 {
11760 IEMOP_HLP_NO_LOCK_PREFIX();
11761 IEM_MC_BEGIN(4, 1);
11762 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11763 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11764 IEM_MC_ARG(uint32_t, u32Value, 2);
11765 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11766 IEM_MC_LOCAL(int32_t, rc);
11767
11768 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11769 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11770 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11771 IEM_MC_REF_EFLAGS(pEFlags);
11772 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11773 IEM_MC_IF_LOCAL_IS_Z(rc) {
11774 IEM_MC_ADVANCE_RIP();
11775 } IEM_MC_ELSE() {
11776 IEM_MC_RAISE_DIVIDE_ERROR();
11777 } IEM_MC_ENDIF();
11778
11779 IEM_MC_END();
11780 return VINF_SUCCESS;
11781 }
11782
11783 case IEMMODE_64BIT:
11784 {
11785 IEMOP_HLP_NO_LOCK_PREFIX();
11786 IEM_MC_BEGIN(4, 1);
11787 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11788 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11789 IEM_MC_ARG(uint64_t, u64Value, 2);
11790 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11791 IEM_MC_LOCAL(int32_t, rc);
11792
11793 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11794 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11795 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11796 IEM_MC_REF_EFLAGS(pEFlags);
11797 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11798 IEM_MC_IF_LOCAL_IS_Z(rc) {
11799 IEM_MC_ADVANCE_RIP();
11800 } IEM_MC_ELSE() {
11801 IEM_MC_RAISE_DIVIDE_ERROR();
11802 } IEM_MC_ENDIF();
11803
11804 IEM_MC_END();
11805 return VINF_SUCCESS;
11806 }
11807
11808 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11809 }
11810 }
11811 else
11812 {
11813 /* memory access. */
11814 switch (pIemCpu->enmEffOpSize)
11815 {
11816 case IEMMODE_16BIT:
11817 {
11818 IEMOP_HLP_NO_LOCK_PREFIX();
11819 IEM_MC_BEGIN(4, 2);
11820 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11821 IEM_MC_ARG(uint16_t *, pu16DX, 1);
11822 IEM_MC_ARG(uint16_t, u16Value, 2);
11823 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11824 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11825 IEM_MC_LOCAL(int32_t, rc);
11826
11827 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11828 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
11829 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11830 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11831 IEM_MC_REF_EFLAGS(pEFlags);
11832 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11833 IEM_MC_IF_LOCAL_IS_Z(rc) {
11834 IEM_MC_ADVANCE_RIP();
11835 } IEM_MC_ELSE() {
11836 IEM_MC_RAISE_DIVIDE_ERROR();
11837 } IEM_MC_ENDIF();
11838
11839 IEM_MC_END();
11840 return VINF_SUCCESS;
11841 }
11842
11843 case IEMMODE_32BIT:
11844 {
11845 IEMOP_HLP_NO_LOCK_PREFIX();
11846 IEM_MC_BEGIN(4, 2);
11847 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11848 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11849 IEM_MC_ARG(uint32_t, u32Value, 2);
11850 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11851 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11852 IEM_MC_LOCAL(int32_t, rc);
11853
11854 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11855 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
11856 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11857 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11858 IEM_MC_REF_EFLAGS(pEFlags);
11859 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11860 IEM_MC_IF_LOCAL_IS_Z(rc) {
11861 IEM_MC_ADVANCE_RIP();
11862 } IEM_MC_ELSE() {
11863 IEM_MC_RAISE_DIVIDE_ERROR();
11864 } IEM_MC_ENDIF();
11865
11866 IEM_MC_END();
11867 return VINF_SUCCESS;
11868 }
11869
11870 case IEMMODE_64BIT:
11871 {
11872 IEMOP_HLP_NO_LOCK_PREFIX();
11873 IEM_MC_BEGIN(4, 2);
11874 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11875 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11876 IEM_MC_ARG(uint64_t, u64Value, 2);
11877 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11878 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11879 IEM_MC_LOCAL(int32_t, rc);
11880
11881 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11882 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
11883 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11884 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11885 IEM_MC_REF_EFLAGS(pEFlags);
11886 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11887 IEM_MC_IF_LOCAL_IS_Z(rc) {
11888 IEM_MC_ADVANCE_RIP();
11889 } IEM_MC_ELSE() {
11890 IEM_MC_RAISE_DIVIDE_ERROR();
11891 } IEM_MC_ENDIF();
11892
11893 IEM_MC_END();
11894 return VINF_SUCCESS;
11895 }
11896
11897 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11898 }
11899 }
11900}
11901
11902/** Opcode 0xf6. */
11903FNIEMOP_DEF(iemOp_Grp3_Eb)
11904{
11905 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11906 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11907 {
11908 case 0:
11909 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
11910 case 1:
11911 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11912 case 2:
11913 IEMOP_MNEMONIC("not Eb");
11914 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
11915 case 3:
11916 IEMOP_MNEMONIC("neg Eb");
11917 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
11918 case 4:
11919 IEMOP_MNEMONIC("mul Eb");
11920 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11921 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
11922 case 5:
11923 IEMOP_MNEMONIC("imul Eb");
11924 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11925 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
11926 case 6:
11927 IEMOP_MNEMONIC("div Eb");
11928 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11929 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
11930 case 7:
11931 IEMOP_MNEMONIC("idiv Eb");
11932 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11933 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
11934 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11935 }
11936}
11937
11938
11939/** Opcode 0xf7. */
11940FNIEMOP_DEF(iemOp_Grp3_Ev)
11941{
11942 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11943 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11944 {
11945 case 0:
11946 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
11947 case 1:
11948 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11949 case 2:
11950 IEMOP_MNEMONIC("not Ev");
11951 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
11952 case 3:
11953 IEMOP_MNEMONIC("neg Ev");
11954 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
11955 case 4:
11956 IEMOP_MNEMONIC("mul Ev");
11957 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11958 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
11959 case 5:
11960 IEMOP_MNEMONIC("imul Ev");
11961 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11962 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
11963 case 6:
11964 IEMOP_MNEMONIC("div Ev");
11965 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11966 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
11967 case 7:
11968 IEMOP_MNEMONIC("idiv Ev");
11969 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11970 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
11971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11972 }
11973}
11974
11975
11976/** Opcode 0xf8. */
11977FNIEMOP_DEF(iemOp_clc)
11978{
11979 IEMOP_MNEMONIC("clc");
11980 IEMOP_HLP_NO_LOCK_PREFIX();
11981 IEM_MC_BEGIN(0, 0);
11982 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
11983 IEM_MC_ADVANCE_RIP();
11984 IEM_MC_END();
11985 return VINF_SUCCESS;
11986}
11987
11988
11989/** Opcode 0xf9. */
11990FNIEMOP_DEF(iemOp_stc)
11991{
11992 IEMOP_MNEMONIC("stc");
11993 IEMOP_HLP_NO_LOCK_PREFIX();
11994 IEM_MC_BEGIN(0, 0);
11995 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
11996 IEM_MC_ADVANCE_RIP();
11997 IEM_MC_END();
11998 return VINF_SUCCESS;
11999}
12000
12001
12002/** Opcode 0xfa. */
12003FNIEMOP_DEF(iemOp_cli)
12004{
12005 IEMOP_MNEMONIC("cli");
12006 IEMOP_HLP_NO_LOCK_PREFIX();
12007 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
12008}
12009
12010
12011FNIEMOP_DEF(iemOp_sti)
12012{
12013 IEMOP_MNEMONIC("sti");
12014 IEMOP_HLP_NO_LOCK_PREFIX();
12015 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
12016}
12017
12018
12019/** Opcode 0xfc. */
12020FNIEMOP_DEF(iemOp_cld)
12021{
12022 IEMOP_MNEMONIC("cld");
12023 IEMOP_HLP_NO_LOCK_PREFIX();
12024 IEM_MC_BEGIN(0, 0);
12025 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
12026 IEM_MC_ADVANCE_RIP();
12027 IEM_MC_END();
12028 return VINF_SUCCESS;
12029}
12030
12031
12032/** Opcode 0xfd. */
12033FNIEMOP_DEF(iemOp_std)
12034{
12035 IEMOP_MNEMONIC("std");
12036 IEMOP_HLP_NO_LOCK_PREFIX();
12037 IEM_MC_BEGIN(0, 0);
12038 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
12039 IEM_MC_ADVANCE_RIP();
12040 IEM_MC_END();
12041 return VINF_SUCCESS;
12042}
12043
12044
12045/** Opcode 0xfe. */
12046FNIEMOP_DEF(iemOp_Grp4)
12047{
12048 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12049 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12050 {
12051 case 0:
12052 IEMOP_MNEMONIC("inc Ev");
12053 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
12054 case 1:
12055 IEMOP_MNEMONIC("dec Ev");
12056 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
12057 default:
12058 IEMOP_MNEMONIC("grp4-ud");
12059 return IEMOP_RAISE_INVALID_OPCODE();
12060 }
12061}
12062
12063
12064/**
12065 * Opcode 0xff /2.
12066 * @param bRm The RM byte.
12067 */
12068FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
12069{
12070 IEMOP_MNEMONIC("calln Ev");
12071 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
12072 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12073
12074 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12075 {
12076 /* The new RIP is taken from a register. */
12077 switch (pIemCpu->enmEffOpSize)
12078 {
12079 case IEMMODE_16BIT:
12080 IEM_MC_BEGIN(1, 0);
12081 IEM_MC_ARG(uint16_t, u16Target, 0);
12082 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12083 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
12084 IEM_MC_END()
12085 return VINF_SUCCESS;
12086
12087 case IEMMODE_32BIT:
12088 IEM_MC_BEGIN(1, 0);
12089 IEM_MC_ARG(uint32_t, u32Target, 0);
12090 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12091 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
12092 IEM_MC_END()
12093 return VINF_SUCCESS;
12094
12095 case IEMMODE_64BIT:
12096 IEM_MC_BEGIN(1, 0);
12097 IEM_MC_ARG(uint64_t, u64Target, 0);
12098 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12099 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
12100 IEM_MC_END()
12101 return VINF_SUCCESS;
12102
12103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12104 }
12105 }
12106 else
12107 {
12108 /* The new RIP is taken from a register. */
12109 switch (pIemCpu->enmEffOpSize)
12110 {
12111 case IEMMODE_16BIT:
12112 IEM_MC_BEGIN(1, 1);
12113 IEM_MC_ARG(uint16_t, u16Target, 0);
12114 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12115 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12116 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
12117 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
12118 IEM_MC_END()
12119 return VINF_SUCCESS;
12120
12121 case IEMMODE_32BIT:
12122 IEM_MC_BEGIN(1, 1);
12123 IEM_MC_ARG(uint32_t, u32Target, 0);
12124 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12125 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12126 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
12127 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
12128 IEM_MC_END()
12129 return VINF_SUCCESS;
12130
12131 case IEMMODE_64BIT:
12132 IEM_MC_BEGIN(1, 1);
12133 IEM_MC_ARG(uint64_t, u64Target, 0);
12134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12135 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12136 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
12137 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
12138 IEM_MC_END()
12139 return VINF_SUCCESS;
12140
12141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12142 }
12143 }
12144}
12145
12146
12147/**
12148 * Opcode 0xff /3.
12149 * @param bRm The RM byte.
12150 */
12151FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
12152{
12153 IEMOP_MNEMONIC("callf Ep");
12154 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
12155
12156 /* Registers? How?? */
12157 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12158 {
12159 /** @todo How the heck does a 'callf eax' work? Probably just have to
12160 * search the docs... */
12161 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
12162 }
12163
12164 /* Far pointer loaded from memory. */
12165 switch (pIemCpu->enmEffOpSize)
12166 {
12167 case IEMMODE_16BIT:
12168 IEM_MC_BEGIN(3, 1);
12169 IEM_MC_ARG(uint16_t, u16Sel, 0);
12170 IEM_MC_ARG(uint16_t, offSeg, 1);
12171 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
12172 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12173 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12174 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
12175 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
12176 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
12177 IEM_MC_END();
12178 return VINF_SUCCESS;
12179
12180 case IEMMODE_32BIT:
12181 IEM_MC_BEGIN(3, 1);
12182 IEM_MC_ARG(uint16_t, u16Sel, 0);
12183 IEM_MC_ARG(uint32_t, offSeg, 1);
12184 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
12185 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12186 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12187 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
12188 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
12189 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
12190 IEM_MC_END();
12191 return VINF_SUCCESS;
12192
12193 case IEMMODE_64BIT:
12194 IEM_MC_BEGIN(3, 1);
12195 IEM_MC_ARG(uint16_t, u16Sel, 0);
12196 IEM_MC_ARG(uint64_t, offSeg, 1);
12197 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
12198 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12199 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12200 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
12201 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
12202 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
12203 IEM_MC_END();
12204 return VINF_SUCCESS;
12205
12206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12207 }
12208}
12209
12210
12211/**
12212 * Opcode 0xff /4.
12213 * @param bRm The RM byte.
12214 */
12215FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
12216{
12217 IEMOP_MNEMONIC("jmpn Ev");
12218 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
12219 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12220
12221 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12222 {
12223 /* The new RIP is taken from a register. */
12224 switch (pIemCpu->enmEffOpSize)
12225 {
12226 case IEMMODE_16BIT:
12227 IEM_MC_BEGIN(0, 1);
12228 IEM_MC_LOCAL(uint16_t, u16Target);
12229 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12230 IEM_MC_SET_RIP_U16(u16Target);
12231 IEM_MC_END()
12232 return VINF_SUCCESS;
12233
12234 case IEMMODE_32BIT:
12235 IEM_MC_BEGIN(0, 1);
12236 IEM_MC_LOCAL(uint32_t, u32Target);
12237 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12238 IEM_MC_SET_RIP_U32(u32Target);
12239 IEM_MC_END()
12240 return VINF_SUCCESS;
12241
12242 case IEMMODE_64BIT:
12243 IEM_MC_BEGIN(0, 1);
12244 IEM_MC_LOCAL(uint64_t, u64Target);
12245 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12246 IEM_MC_SET_RIP_U64(u64Target);
12247 IEM_MC_END()
12248 return VINF_SUCCESS;
12249
12250 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12251 }
12252 }
12253 else
12254 {
12255 /* The new RIP is taken from a register. */
12256 switch (pIemCpu->enmEffOpSize)
12257 {
12258 case IEMMODE_16BIT:
12259 IEM_MC_BEGIN(0, 2);
12260 IEM_MC_LOCAL(uint16_t, u16Target);
12261 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12262 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12263 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
12264 IEM_MC_SET_RIP_U16(u16Target);
12265 IEM_MC_END()
12266 return VINF_SUCCESS;
12267
12268 case IEMMODE_32BIT:
12269 IEM_MC_BEGIN(0, 2);
12270 IEM_MC_LOCAL(uint32_t, u32Target);
12271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12272 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12273 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
12274 IEM_MC_SET_RIP_U32(u32Target);
12275 IEM_MC_END()
12276 return VINF_SUCCESS;
12277
12278 case IEMMODE_64BIT:
12279 IEM_MC_BEGIN(0, 2);
12280 IEM_MC_LOCAL(uint32_t, u32Target);
12281 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12282 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12283 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
12284 IEM_MC_SET_RIP_U32(u32Target);
12285 IEM_MC_END()
12286 return VINF_SUCCESS;
12287
12288 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12289 }
12290 }
12291}
12292
12293
12294/**
12295 * Opcode 0xff /5.
12296 * @param bRm The RM byte.
12297 */
12298FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
12299{
12300 IEMOP_MNEMONIC("jmp Ep");
12301 IEMOP_HLP_NO_64BIT();
12302 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
12303
12304 /* Decode the far pointer address and pass it on to the far call C
12305 implementation. */
12306 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12307 {
12308 /** @todo How the heck does a 'callf eax' work? Probably just have to
12309 * search the docs... */
12310 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
12311 }
12312
12313 /* Far pointer loaded from memory. */
12314 switch (pIemCpu->enmEffOpSize)
12315 {
12316 case IEMMODE_16BIT:
12317 IEM_MC_BEGIN(3, 1);
12318 IEM_MC_ARG(uint16_t, u16Sel, 0);
12319 IEM_MC_ARG(uint16_t, offSeg, 1);
12320 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
12321 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12322 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12323 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
12324 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
12325 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
12326 IEM_MC_END();
12327 return VINF_SUCCESS;
12328
12329 case IEMMODE_32BIT:
12330 IEM_MC_BEGIN(3, 1);
12331 IEM_MC_ARG(uint16_t, u16Sel, 0);
12332 IEM_MC_ARG(uint32_t, offSeg, 1);
12333 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
12334 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12335 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12336 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
12337 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
12338 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
12339 IEM_MC_END();
12340 return VINF_SUCCESS;
12341
12342 case IEMMODE_64BIT:
12343 IEM_MC_BEGIN(3, 1);
12344 IEM_MC_ARG(uint16_t, u16Sel, 0);
12345 IEM_MC_ARG(uint64_t, offSeg, 1);
12346 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
12347 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12348 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12349 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
12350 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
12351 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
12352 IEM_MC_END();
12353 return VINF_SUCCESS;
12354
12355 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12356 }
12357}
12358
12359
12360/**
12361 * Opcode 0xff /6.
12362 * @param bRm The RM byte.
12363 */
12364FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
12365{
12366 IEMOP_MNEMONIC("push Ev");
12367 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
12368
12369 /* Registers are handled by a common worker. */
12370 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12371 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12372
12373 /* Memory we do here. */
12374 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12375 switch (pIemCpu->enmEffOpSize)
12376 {
12377 case IEMMODE_16BIT:
12378 IEM_MC_BEGIN(0, 2);
12379 IEM_MC_LOCAL(uint16_t, u16Src);
12380 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12382 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
12383 IEM_MC_PUSH_U16(u16Src);
12384 IEM_MC_ADVANCE_RIP();
12385 IEM_MC_END();
12386 return VINF_SUCCESS;
12387
12388 case IEMMODE_32BIT:
12389 IEM_MC_BEGIN(0, 2);
12390 IEM_MC_LOCAL(uint32_t, u32Src);
12391 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12392 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12393 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
12394 IEM_MC_PUSH_U32(u32Src);
12395 IEM_MC_ADVANCE_RIP();
12396 IEM_MC_END();
12397 return VINF_SUCCESS;
12398
12399 case IEMMODE_64BIT:
12400 IEM_MC_BEGIN(0, 2);
12401 IEM_MC_LOCAL(uint64_t, u64Src);
12402 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12403 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
12404 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
12405 IEM_MC_PUSH_U64(u64Src);
12406 IEM_MC_ADVANCE_RIP();
12407 IEM_MC_END();
12408 return VINF_SUCCESS;
12409
12410 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12411 }
12412}
12413
12414
12415/** Opcode 0xff. */
12416FNIEMOP_DEF(iemOp_Grp5)
12417{
12418 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12419 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12420 {
12421 case 0:
12422 IEMOP_MNEMONIC("inc Ev");
12423 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
12424 case 1:
12425 IEMOP_MNEMONIC("dec Ev");
12426 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
12427 case 2:
12428 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
12429 case 3:
12430 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
12431 case 4:
12432 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
12433 case 5:
12434 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
12435 case 6:
12436 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
12437 case 7:
12438 IEMOP_MNEMONIC("grp5-ud");
12439 return IEMOP_RAISE_INVALID_OPCODE();
12440 }
12441 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
12442}
12443
12444
12445
12446const PFNIEMOP g_apfnOneByteMap[256] =
12447{
12448 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
12449 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
12450 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
12451 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
12452 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
12453 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
12454 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
12455 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
12456 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
12457 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
12458 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
12459 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
12460 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
12461 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
12462 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
12463 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
12464 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
12465 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
12466 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
12467 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
12468 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
12469 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
12470 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
12471 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
12472 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
12473 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
12474 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
12475 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
12476 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
12477 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
12478 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
12479 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
12480 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
12481 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
12482 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
12483 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
12484 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
12485 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
12486 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
12487 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
12488 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
12489 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
12490 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
12491 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
12492 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
12493 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
12494 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
12495 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
12496 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
12497 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
12498 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
12499 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
12500 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
12501 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
12502 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
12503 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
12504 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
12505 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
12506 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
12507 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
12508 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
12509 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
12510 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
12511 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
12512};
12513
12514
12515/** @} */
12516
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette