VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 37008

Last change on this file since 37008 was 37008, checked in by vboxsync, 14 years ago

IEM: More checks and another build fix attempt.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 383.7 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 37008 2011-05-09 08:51:42Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 IEM_MC_ADVANCE_RIP();
133 IEM_MC_END();
134 break;
135
136 case IEMMODE_64BIT:
137 IEM_MC_BEGIN(3, 0);
138 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
139 IEM_MC_ARG(uint64_t, u64Src, 1);
140 IEM_MC_ARG(uint32_t *, pEFlags, 2);
141
142 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
143 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
144 IEM_MC_REF_EFLAGS(pEFlags);
145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
146
147 IEM_MC_ADVANCE_RIP();
148 IEM_MC_END();
149 break;
150 }
151 }
152 else
153 {
154 /*
155 * We're accessing memory.
156 * Note! We're putting the eflags on the stack here so we can commit them
157 * after the memory.
158 */
159 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
160 switch (pIemCpu->enmEffOpSize)
161 {
162 case IEMMODE_16BIT:
163 IEM_MC_BEGIN(3, 2);
164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
165 IEM_MC_ARG(uint16_t, u16Src, 1);
166 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
167 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
168
169 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
170 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
171 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
172 IEM_MC_FETCH_EFLAGS(EFlags);
173 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
175 else
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
177
178 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
179 IEM_MC_COMMIT_EFLAGS(EFlags);
180 IEM_MC_ADVANCE_RIP();
181 IEM_MC_END();
182 break;
183
184 case IEMMODE_32BIT:
185 IEM_MC_BEGIN(3, 2);
186 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
187 IEM_MC_ARG(uint32_t, u32Src, 1);
188 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
190
191 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
192 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
193 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
194 IEM_MC_FETCH_EFLAGS(EFlags);
195 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
197 else
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
199
200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
201 IEM_MC_COMMIT_EFLAGS(EFlags);
202 IEM_MC_ADVANCE_RIP();
203 IEM_MC_END();
204 break;
205
206 case IEMMODE_64BIT:
207 IEM_MC_BEGIN(3, 2);
208 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
209 IEM_MC_ARG(uint64_t, u64Src, 1);
210 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
211 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
212
213 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
214 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
215 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
216 IEM_MC_FETCH_EFLAGS(EFlags);
217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
219 else
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
221
222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
223 IEM_MC_COMMIT_EFLAGS(EFlags);
224 IEM_MC_ADVANCE_RIP();
225 IEM_MC_END();
226 break;
227 }
228 }
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
235 * the destination.
236 *
237 * @param pImpl Pointer to the instruction implementation (assembly).
238 */
239FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
240{
241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
243
244 /*
245 * If rm is denoting a register, no more instruction bytes.
246 */
247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
248 {
249 IEM_MC_BEGIN(3, 0);
250 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
251 IEM_MC_ARG(uint8_t, u8Src, 1);
252 IEM_MC_ARG(uint32_t *, pEFlags, 2);
253
254 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
255 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
256 IEM_MC_REF_EFLAGS(pEFlags);
257 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
258
259 IEM_MC_ADVANCE_RIP();
260 IEM_MC_END();
261 }
262 else
263 {
264 /*
265 * We're accessing memory.
266 */
267 IEM_MC_BEGIN(3, 1);
268 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
269 IEM_MC_ARG(uint8_t, u8Src, 1);
270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
272
273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
274 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
275 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
276 IEM_MC_REF_EFLAGS(pEFlags);
277 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
278
279 IEM_MC_ADVANCE_RIP();
280 IEM_MC_END();
281 }
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
288 * register as the destination.
289 *
290 * @param pImpl Pointer to the instruction implementation (assembly).
291 */
292FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
293{
294 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
295 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
296
297 /*
298 * If rm is denoting a register, no more instruction bytes.
299 */
300 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
301 {
302 switch (pIemCpu->enmEffOpSize)
303 {
304 case IEMMODE_16BIT:
305 IEM_MC_BEGIN(3, 0);
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
307 IEM_MC_ARG(uint16_t, u16Src, 1);
308 IEM_MC_ARG(uint32_t *, pEFlags, 2);
309
310 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
311 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
312 IEM_MC_REF_EFLAGS(pEFlags);
313 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
314
315 IEM_MC_ADVANCE_RIP();
316 IEM_MC_END();
317 break;
318
319 case IEMMODE_32BIT:
320 IEM_MC_BEGIN(3, 0);
321 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
322 IEM_MC_ARG(uint32_t, u32Src, 1);
323 IEM_MC_ARG(uint32_t *, pEFlags, 2);
324
325 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
326 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
327 IEM_MC_REF_EFLAGS(pEFlags);
328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
329
330 IEM_MC_ADVANCE_RIP();
331 IEM_MC_END();
332 break;
333
334 case IEMMODE_64BIT:
335 IEM_MC_BEGIN(3, 0);
336 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
337 IEM_MC_ARG(uint64_t, u64Src, 1);
338 IEM_MC_ARG(uint32_t *, pEFlags, 2);
339
340 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
341 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
342 IEM_MC_REF_EFLAGS(pEFlags);
343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
344
345 IEM_MC_ADVANCE_RIP();
346 IEM_MC_END();
347 break;
348 }
349 }
350 else
351 {
352 /*
353 * We're accessing memory.
354 */
355 switch (pIemCpu->enmEffOpSize)
356 {
357 case IEMMODE_16BIT:
358 IEM_MC_BEGIN(3, 1);
359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
360 IEM_MC_ARG(uint16_t, u16Src, 1);
361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
363
364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
365 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
366 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
367 IEM_MC_REF_EFLAGS(pEFlags);
368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
369
370 IEM_MC_ADVANCE_RIP();
371 IEM_MC_END();
372 break;
373
374 case IEMMODE_32BIT:
375 IEM_MC_BEGIN(3, 1);
376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
377 IEM_MC_ARG(uint32_t, u32Src, 1);
378 IEM_MC_ARG(uint32_t *, pEFlags, 2);
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
380
381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
382 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
383 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
384 IEM_MC_REF_EFLAGS(pEFlags);
385 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
386
387 IEM_MC_ADVANCE_RIP();
388 IEM_MC_END();
389 break;
390
391 case IEMMODE_64BIT:
392 IEM_MC_BEGIN(3, 1);
393 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
394 IEM_MC_ARG(uint64_t, u64Src, 1);
395 IEM_MC_ARG(uint32_t *, pEFlags, 2);
396 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
397
398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
399 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
400 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
401 IEM_MC_REF_EFLAGS(pEFlags);
402 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
403
404 IEM_MC_ADVANCE_RIP();
405 IEM_MC_END();
406 break;
407 }
408 }
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
415 * a byte immediate.
416 *
417 * @param pImpl Pointer to the instruction implementation (assembly).
418 */
419FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
420{
421 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
422 IEMOP_HLP_NO_LOCK_PREFIX();
423
424 IEM_MC_BEGIN(3, 0);
425 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
426 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
428
429 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
430 IEM_MC_REF_EFLAGS(pEFlags);
431 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
432
433 IEM_MC_ADVANCE_RIP();
434 IEM_MC_END();
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Common worker for instructions like ADD, AND, OR, ++ with working on
441 * AX/EAX/RAX with a word/dword immediate.
442 *
443 * @param pImpl Pointer to the instruction implementation (assembly).
444 */
445FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
446{
447 switch (pIemCpu->enmEffOpSize)
448 {
449 case IEMMODE_16BIT:
450 {
451 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
452 IEMOP_HLP_NO_LOCK_PREFIX();
453
454 IEM_MC_BEGIN(3, 0);
455 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
456 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
458
459 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
460 IEM_MC_REF_EFLAGS(pEFlags);
461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
462
463 IEM_MC_ADVANCE_RIP();
464 IEM_MC_END();
465 return VINF_SUCCESS;
466 }
467
468 case IEMMODE_32BIT:
469 {
470 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
471 IEMOP_HLP_NO_LOCK_PREFIX();
472
473 IEM_MC_BEGIN(3, 0);
474 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
475 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
476 IEM_MC_ARG(uint32_t *, pEFlags, 2);
477
478 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
479 IEM_MC_REF_EFLAGS(pEFlags);
480 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
481
482 IEM_MC_ADVANCE_RIP();
483 IEM_MC_END();
484 return VINF_SUCCESS;
485 }
486
487 case IEMMODE_64BIT:
488 {
489 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
490 IEMOP_HLP_NO_LOCK_PREFIX();
491
492 IEM_MC_BEGIN(3, 0);
493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
494 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
496
497 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
498 IEM_MC_REF_EFLAGS(pEFlags);
499 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
500
501 IEM_MC_ADVANCE_RIP();
502 IEM_MC_END();
503 return VINF_SUCCESS;
504 }
505
506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
507 }
508}
509
510
511/** Opcodes 0xf1, 0xd6. */
512FNIEMOP_DEF(iemOp_Invalid)
513{
514 IEMOP_MNEMONIC("Invalid");
515 return IEMOP_RAISE_INVALID_OPCODE();
516}
517
518
519
520/** @name ..... opcodes.
521 *
522 * @{
523 */
524
525/** @} */
526
527
528/** @name Two byte opcodes (first byte 0x0f).
529 *
530 * @{
531 */
532
533/** Opcode 0x0f 0x00 /0. */
534FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
535
536
537/** Opcode 0x0f 0x00 /1. */
538FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
539
540
541/** Opcode 0x0f 0x00 /2. */
542FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
543{
544 IEMOP_HLP_NO_LOCK_PREFIX();
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEM_MC_BEGIN(1, 0);
548 IEM_MC_ARG(uint16_t, u16Sel, 0);
549 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
550 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
551 IEM_MC_END();
552 }
553 else
554 {
555 IEM_MC_BEGIN(1, 1);
556 IEM_MC_ARG(uint16_t, u16Sel, 0);
557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
558 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
560 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
561 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
562 IEM_MC_END();
563 }
564 return VINF_SUCCESS;
565}
566
567
568/** Opcode 0x0f 0x00 /3. */
569FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
570{
571 IEMOP_HLP_NO_LOCK_PREFIX();
572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
573 {
574 IEM_MC_BEGIN(1, 0);
575 IEM_MC_ARG(uint16_t, u16Sel, 0);
576 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
577 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
578 IEM_MC_END();
579 }
580 else
581 {
582 IEM_MC_BEGIN(1, 1);
583 IEM_MC_ARG(uint16_t, u16Sel, 0);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
585 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
587 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
588 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
589 IEM_MC_END();
590 }
591 return VINF_SUCCESS;
592}
593
594
595/** Opcode 0x0f 0x00 /4. */
596FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
597
598
599/** Opcode 0x0f 0x00 /5. */
600FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
601
602
603/** Opcode 0x0f 0x00. */
604FNIEMOP_DEF(iemOp_Grp6)
605{
606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
608 {
609 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
610 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
611 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
612 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
613 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
614 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
615 case 6: return IEMOP_RAISE_INVALID_OPCODE();
616 case 7: return IEMOP_RAISE_INVALID_OPCODE();
617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
618 }
619
620}
621
622
623/** Opcode 0x0f 0x01 /0. */
624FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
625{
626 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
627}
628
629
630/** Opcode 0x0f 0x01 /0. */
631FNIEMOP_DEF(iemOp_Grp7_vmcall)
632{
633 AssertFailed();
634 return IEMOP_RAISE_INVALID_OPCODE();
635}
636
637
638/** Opcode 0x0f 0x01 /0. */
639FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
640{
641 AssertFailed();
642 return IEMOP_RAISE_INVALID_OPCODE();
643}
644
645
646/** Opcode 0x0f 0x01 /0. */
647FNIEMOP_DEF(iemOp_Grp7_vmresume)
648{
649 AssertFailed();
650 return IEMOP_RAISE_INVALID_OPCODE();
651}
652
653
654/** Opcode 0x0f 0x01 /0. */
655FNIEMOP_DEF(iemOp_Grp7_vmxoff)
656{
657 AssertFailed();
658 return IEMOP_RAISE_INVALID_OPCODE();
659}
660
661
662/** Opcode 0x0f 0x01 /1. */
663FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
664{
665 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
666}
667
668
669/** Opcode 0x0f 0x01 /1. */
670FNIEMOP_DEF(iemOp_Grp7_monitor)
671{
672 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
673}
674
675
676/** Opcode 0x0f 0x01 /1. */
677FNIEMOP_DEF(iemOp_Grp7_mwait)
678{
679 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
680}
681
682
683/** Opcode 0x0f 0x01 /2. */
684FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
685{
686 IEMOP_HLP_NO_LOCK_PREFIX();
687
688 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
689 ? IEMMODE_64BIT
690 : pIemCpu->enmEffOpSize;
691 IEM_MC_BEGIN(3, 1);
692 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
693 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
694 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
695 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
696 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
697 IEM_MC_END();
698 return VINF_SUCCESS;
699}
700
701
702/** Opcode 0x0f 0x01 /2. */
703FNIEMOP_DEF(iemOp_Grp7_xgetbv)
704{
705 AssertFailed();
706 return IEMOP_RAISE_INVALID_OPCODE();
707}
708
709
710/** Opcode 0x0f 0x01 /2. */
711FNIEMOP_DEF(iemOp_Grp7_xsetbv)
712{
713 AssertFailed();
714 return IEMOP_RAISE_INVALID_OPCODE();
715}
716
717
718/** Opcode 0x0f 0x01 /3. */
719FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
720{
721 IEMOP_HLP_NO_LOCK_PREFIX();
722
723 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
724 ? IEMMODE_64BIT
725 : pIemCpu->enmEffOpSize;
726 IEM_MC_BEGIN(3, 1);
727 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
728 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
729 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
731 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
732 IEM_MC_END();
733 return VINF_SUCCESS;
734}
735
736
737/** Opcode 0x0f 0x01 /4. */
738FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
739{
740 IEMOP_HLP_NO_LOCK_PREFIX();
741 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
742 {
743 switch (pIemCpu->enmEffOpSize)
744 {
745 case IEMMODE_16BIT:
746 IEM_MC_BEGIN(0, 1);
747 IEM_MC_LOCAL(uint16_t, u16Tmp);
748 IEM_MC_FETCH_CR0_U16(u16Tmp);
749 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
750 IEM_MC_ADVANCE_RIP();
751 IEM_MC_END();
752 return VINF_SUCCESS;
753
754 case IEMMODE_32BIT:
755 IEM_MC_BEGIN(0, 1);
756 IEM_MC_LOCAL(uint32_t, u32Tmp);
757 IEM_MC_FETCH_CR0_U32(u32Tmp);
758 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
759 IEM_MC_ADVANCE_RIP();
760 IEM_MC_END();
761 return VINF_SUCCESS;
762
763 case IEMMODE_64BIT:
764 IEM_MC_BEGIN(0, 1);
765 IEM_MC_LOCAL(uint64_t, u64Tmp);
766 IEM_MC_FETCH_CR0_U64(u64Tmp);
767 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
768 IEM_MC_ADVANCE_RIP();
769 IEM_MC_END();
770 return VINF_SUCCESS;
771
772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
773 }
774 }
775 else
776 {
777 /* Ignore operand size here, memory refs are always 16-bit. */
778 IEM_MC_BEGIN(0, 2);
779 IEM_MC_LOCAL(uint16_t, u16Tmp);
780 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
781 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
782 IEM_MC_FETCH_CR0_U16(u16Tmp);
783 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
784 IEM_MC_ADVANCE_RIP();
785 IEM_MC_END();
786 return VINF_SUCCESS;
787 }
788}
789
790
791/** Opcode 0x0f 0x01 /6. */
792FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
793{
794 /* The operand size is effectively ignored, all is 16-bit and only the
795 lower 3-bits are used. */
796 IEMOP_HLP_NO_LOCK_PREFIX();
797 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
798 {
799 IEM_MC_BEGIN(1, 0);
800 IEM_MC_ARG(uint16_t, u16Tmp, 0);
801 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
802 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
803 IEM_MC_END();
804 }
805 else
806 {
807 IEM_MC_BEGIN(1, 1);
808 IEM_MC_ARG(uint16_t, u16Tmp, 0);
809 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
810 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
811 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
812 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
813 IEM_MC_END();
814 }
815 return VINF_SUCCESS;
816}
817
818
819/** Opcode 0x0f 0x01 /7. */
820FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
821{
822 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
823}
824
825
826/** Opcode 0x0f 0x01 /7. */
827FNIEMOP_DEF(iemOp_Grp7_swapgs)
828{
829 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
830}
831
832
833/** Opcode 0x0f 0x01 /7. */
834FNIEMOP_DEF(iemOp_Grp7_rdtscp)
835{
836 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
837}
838
839
840/** Opcode 0x0f 0x01. */
841FNIEMOP_DEF(iemOp_Grp7)
842{
843 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
844 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
845 {
846 case 0:
847 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
848 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
849 switch (bRm & X86_MODRM_RM_MASK)
850 {
851 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
852 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
853 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
854 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
855 }
856 return IEMOP_RAISE_INVALID_OPCODE();
857
858 case 1:
859 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
860 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
861 switch (bRm & X86_MODRM_RM_MASK)
862 {
863 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
864 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
865 }
866 return IEMOP_RAISE_INVALID_OPCODE();
867
868 case 2:
869 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
870 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
871 switch (bRm & X86_MODRM_RM_MASK)
872 {
873 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
874 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
875 }
876 return IEMOP_RAISE_INVALID_OPCODE();
877
878 case 3:
879 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
880 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
881 return IEMOP_RAISE_INVALID_OPCODE();
882
883 case 4:
884 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
885
886 case 5:
887 return IEMOP_RAISE_INVALID_OPCODE();
888
889 case 6:
890 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
891
892 case 7:
893 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
894 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
895 switch (bRm & X86_MODRM_RM_MASK)
896 {
897 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
898 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
899 }
900 return IEMOP_RAISE_INVALID_OPCODE();
901
902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
903 }
904}
905
906
907/** Opcode 0x0f 0x02. */
908FNIEMOP_STUB(iemOp_lar_Gv_Ew);
909/** Opcode 0x0f 0x03. */
910FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
911/** Opcode 0x0f 0x04. */
912FNIEMOP_STUB(iemOp_syscall);
913
914
915/** Opcode 0x0f 0x05. */
916FNIEMOP_DEF(iemOp_clts)
917{
918 IEMOP_MNEMONIC("clts");
919 IEMOP_HLP_NO_LOCK_PREFIX();
920 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
921}
922
923
924/** Opcode 0x0f 0x06. */
925FNIEMOP_STUB(iemOp_sysret);
926/** Opcode 0x0f 0x08. */
927FNIEMOP_STUB(iemOp_invd);
928/** Opcode 0x0f 0x09. */
929FNIEMOP_STUB(iemOp_wbinvd);
930/** Opcode 0x0f 0x0b. */
931FNIEMOP_STUB(iemOp_ud2);
932/** Opcode 0x0f 0x0d. */
933FNIEMOP_STUB(iemOp_nop_Ev_prefetch);
934/** Opcode 0x0f 0x0e. */
935FNIEMOP_STUB(iemOp_femms);
936/** Opcode 0x0f 0x0f. */
937FNIEMOP_STUB(iemOp_3Dnow);
938/** Opcode 0x0f 0x10. */
939FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
940/** Opcode 0x0f 0x11. */
941FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
942/** Opcode 0x0f 0x12. */
943FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
944/** Opcode 0x0f 0x13. */
945FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
946/** Opcode 0x0f 0x14. */
947FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
948/** Opcode 0x0f 0x15. */
949FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
950/** Opcode 0x0f 0x16. */
951FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
952/** Opcode 0x0f 0x17. */
953FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
954/** Opcode 0x0f 0x18. */
955FNIEMOP_STUB(iemOp_prefetch_Grp16);
956
957
958/** Opcode 0x0f 0x20. */
959FNIEMOP_DEF(iemOp_mov_Rd_Cd)
960{
961 /* mod is ignored, as is operand size overrides. */
962 IEMOP_MNEMONIC("mov Rd,Cd");
963 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
964 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
965 else
966 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
967
968 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
969 * before the privilege level violation (\#GP). */
970 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
971 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
972 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
973 {
974 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
975 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
976 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
977 iCrReg |= 8;
978 }
979 switch (iCrReg)
980 {
981 case 0: case 2: case 3: case 4: case 8:
982 break;
983 default:
984 return IEMOP_RAISE_INVALID_OPCODE();
985 }
986
987 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
988}
989
990
991/** Opcode 0x0f 0x21. */
992FNIEMOP_DEF(iemOp_mov_Rd_Dd)
993{
994 IEMOP_MNEMONIC("mov Rd,Dd");
995 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
996 IEMOP_HLP_NO_LOCK_PREFIX();
997 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
998 return IEMOP_RAISE_INVALID_OPCODE();
999 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1000 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1001 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1002}
1003
1004
1005/** Opcode 0x0f 0x22. */
1006FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1007{
1008 /* mod is ignored, as is operand size overrides. */
1009 IEMOP_MNEMONIC("mov Cd,Rd");
1010 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1011 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1012 else
1013 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1014
1015 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1016 * before the privilege level violation (\#GP). */
1017 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1018 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1019 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1020 {
1021 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1022 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1023 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1024 iCrReg |= 8;
1025 }
1026 switch (iCrReg)
1027 {
1028 case 0: case 2: case 3: case 4: case 8:
1029 break;
1030 default:
1031 return IEMOP_RAISE_INVALID_OPCODE();
1032 }
1033
1034 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1035}
1036
1037
1038/** Opcode 0x0f 0x23. */
1039FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1040{
1041 IEMOP_MNEMONIC("mov Dd,Rd");
1042 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1043 IEMOP_HLP_NO_LOCK_PREFIX();
1044 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1045 return IEMOP_RAISE_INVALID_OPCODE();
1046 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1047 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1048 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1049}
1050
1051
1052/** Opcode 0x0f 0x24. */
1053FNIEMOP_DEF(iemOp_mov_Rd_Td)
1054{
1055 IEMOP_MNEMONIC("mov Rd,Td");
1056/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1057 return IEMOP_RAISE_INVALID_OPCODE();
1058}
1059
1060
1061
1062/** Opcode 0x0f 0x26. */
1063FNIEMOP_DEF(iemOp_mov_Td_Rd)
1064{
1065 IEMOP_MNEMONIC("mov Td,Rd");
1066 return IEMOP_RAISE_INVALID_OPCODE();
1067}
1068
1069
1070/** Opcode 0x0f 0x28. */
1071FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1072/** Opcode 0x0f 0x29. */
1073FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1074/** Opcode 0x0f 0x2a. */
1075FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1076/** Opcode 0x0f 0x2b. */
1077FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1078/** Opcode 0x0f 0x2c. */
1079FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1080/** Opcode 0x0f 0x2d. */
1081FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1082/** Opcode 0x0f 0x2e. */
1083FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1084/** Opcode 0x0f 0x2f. */
1085FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1086/** Opcode 0x0f 0x30. */
1087FNIEMOP_STUB(iemOp_wrmsr);
1088
1089
1090/** Opcode 0x0f 0x31. */
1091FNIEMOP_DEF(iemOp_rdtsc)
1092{
1093 IEMOP_MNEMONIC("rdtsc");
1094 IEMOP_HLP_NO_LOCK_PREFIX();
1095 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1096}
1097
1098
1099/** Opcode 0x0f 0x33. */
1100FNIEMOP_STUB(iemOp_rdmsr);
1101/** Opcode 0x0f 0x34. */
1102FNIEMOP_STUB(iemOp_rdpmc);
1103/** Opcode 0x0f 0x34. */
1104FNIEMOP_STUB(iemOp_sysenter);
1105/** Opcode 0x0f 0x35. */
1106FNIEMOP_STUB(iemOp_sysexit);
1107/** Opcode 0x0f 0x37. */
1108FNIEMOP_STUB(iemOp_getsec);
1109/** Opcode 0x0f 0x38. */
1110FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1111/** Opcode 0x0f 0x39. */
1112FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1113/** Opcode 0x0f 0x3c (?). */
1114FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1115
1116/**
1117 * Implements a conditional move.
1118 *
1119 * Wish there was an obvious way to do this where we could share and reduce
1120 * code bloat.
1121 *
1122 * @param a_Cnd The conditional "microcode" operation.
1123 */
1124#define CMOV_X(a_Cnd) \
1125 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1126 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1127 { \
1128 switch (pIemCpu->enmEffOpSize) \
1129 { \
1130 case IEMMODE_16BIT: \
1131 IEM_MC_BEGIN(0, 1); \
1132 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1133 a_Cnd { \
1134 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1135 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1136 } IEM_MC_ENDIF(); \
1137 IEM_MC_ADVANCE_RIP(); \
1138 IEM_MC_END(); \
1139 return VINF_SUCCESS; \
1140 \
1141 case IEMMODE_32BIT: \
1142 IEM_MC_BEGIN(0, 1); \
1143 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1144 a_Cnd { \
1145 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1146 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1147 } IEM_MC_ELSE() { \
1148 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1149 } IEM_MC_ENDIF(); \
1150 IEM_MC_ADVANCE_RIP(); \
1151 IEM_MC_END(); \
1152 return VINF_SUCCESS; \
1153 \
1154 case IEMMODE_64BIT: \
1155 IEM_MC_BEGIN(0, 1); \
1156 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1157 a_Cnd { \
1158 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1159 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1160 } IEM_MC_ENDIF(); \
1161 IEM_MC_ADVANCE_RIP(); \
1162 IEM_MC_END(); \
1163 return VINF_SUCCESS; \
1164 \
1165 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1166 } \
1167 } \
1168 else \
1169 { \
1170 switch (pIemCpu->enmEffOpSize) \
1171 { \
1172 case IEMMODE_16BIT: \
1173 IEM_MC_BEGIN(0, 2); \
1174 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1175 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1176 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1177 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1178 a_Cnd { \
1179 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1180 } IEM_MC_ENDIF(); \
1181 IEM_MC_ADVANCE_RIP(); \
1182 IEM_MC_END(); \
1183 return VINF_SUCCESS; \
1184 \
1185 case IEMMODE_32BIT: \
1186 IEM_MC_BEGIN(0, 2); \
1187 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1188 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1189 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1190 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1191 a_Cnd { \
1192 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1193 } IEM_MC_ELSE() { \
1194 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1195 } IEM_MC_ENDIF(); \
1196 IEM_MC_ADVANCE_RIP(); \
1197 IEM_MC_END(); \
1198 return VINF_SUCCESS; \
1199 \
1200 case IEMMODE_64BIT: \
1201 IEM_MC_BEGIN(0, 2); \
1202 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1203 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1204 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1205 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1206 a_Cnd { \
1207 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1208 } IEM_MC_ENDIF(); \
1209 IEM_MC_ADVANCE_RIP(); \
1210 IEM_MC_END(); \
1211 return VINF_SUCCESS; \
1212 \
1213 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1214 } \
1215 } do {} while (0)
1216
1217
1218
1219/** Opcode 0x0f 0x40. */
1220FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1221{
1222 IEMOP_MNEMONIC("cmovo Gv,Ev");
1223 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1224}
1225
1226
1227/** Opcode 0x0f 0x41. */
1228FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1229{
1230 IEMOP_MNEMONIC("cmovno Gv,Ev");
1231 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1232}
1233
1234
1235/** Opcode 0x0f 0x42. */
1236FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1237{
1238 IEMOP_MNEMONIC("cmovc Gv,Ev");
1239 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1240}
1241
1242
1243/** Opcode 0x0f 0x43. */
1244FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1245{
1246 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1247 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1248}
1249
1250
1251/** Opcode 0x0f 0x44. */
1252FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1253{
1254 IEMOP_MNEMONIC("cmove Gv,Ev");
1255 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1256}
1257
1258
1259/** Opcode 0x0f 0x45. */
1260FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1261{
1262 IEMOP_MNEMONIC("cmovne Gv,Ev");
1263 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1264}
1265
1266
1267/** Opcode 0x0f 0x46. */
1268FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1269{
1270 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1271 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1272}
1273
1274
1275/** Opcode 0x0f 0x47. */
1276FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1277{
1278 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1279 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1280}
1281
1282
1283/** Opcode 0x0f 0x48. */
1284FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1285{
1286 IEMOP_MNEMONIC("cmovs Gv,Ev");
1287 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1288}
1289
1290
1291/** Opcode 0x0f 0x49. */
1292FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1293{
1294 IEMOP_MNEMONIC("cmovns Gv,Ev");
1295 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1296}
1297
1298
1299/** Opcode 0x0f 0x4a. */
1300FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1301{
1302 IEMOP_MNEMONIC("cmovp Gv,Ev");
1303 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1304}
1305
1306
1307/** Opcode 0x0f 0x4b. */
1308FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1309{
1310 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1311 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1312}
1313
1314
1315/** Opcode 0x0f 0x4c. */
1316FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1317{
1318 IEMOP_MNEMONIC("cmovl Gv,Ev");
1319 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1320}
1321
1322
1323/** Opcode 0x0f 0x4d. */
1324FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1325{
1326 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1327 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1328}
1329
1330
1331/** Opcode 0x0f 0x4e. */
1332FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1333{
1334 IEMOP_MNEMONIC("cmovle Gv,Ev");
1335 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1336}
1337
1338
1339/** Opcode 0x0f 0x4f. */
1340FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1341{
1342 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1343 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1344}
1345
1346#undef CMOV_X
1347
1348/** Opcode 0x0f 0x50. */
1349FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1350/** Opcode 0x0f 0x51. */
1351FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1352/** Opcode 0x0f 0x52. */
1353FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1354/** Opcode 0x0f 0x53. */
1355FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1356/** Opcode 0x0f 0x54. */
1357FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1358/** Opcode 0x0f 0x55. */
1359FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1360/** Opcode 0x0f 0x56. */
1361FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1362/** Opcode 0x0f 0x57. */
1363FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1364/** Opcode 0x0f 0x58. */
1365FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1366/** Opcode 0x0f 0x59. */
1367FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1368/** Opcode 0x0f 0x5a. */
1369FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1370/** Opcode 0x0f 0x5b. */
1371FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1372/** Opcode 0x0f 0x5c. */
1373FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1374/** Opcode 0x0f 0x5d. */
1375FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1376/** Opcode 0x0f 0x5e. */
1377FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1378/** Opcode 0x0f 0x5f. */
1379FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1380/** Opcode 0x0f 0x60. */
1381FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1382/** Opcode 0x0f 0x61. */
1383FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1384/** Opcode 0x0f 0x62. */
1385FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1386/** Opcode 0x0f 0x63. */
1387FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1388/** Opcode 0x0f 0x64. */
1389FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1390/** Opcode 0x0f 0x65. */
1391FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1392/** Opcode 0x0f 0x66. */
1393FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1394/** Opcode 0x0f 0x67. */
1395FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1396/** Opcode 0x0f 0x68. */
1397FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1398/** Opcode 0x0f 0x69. */
1399FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1400/** Opcode 0x0f 0x6a. */
1401FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1402/** Opcode 0x0f 0x6b. */
1403FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1404/** Opcode 0x0f 0x6c. */
1405FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1406/** Opcode 0x0f 0x6d. */
1407FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1408/** Opcode 0x0f 0x6e. */
1409FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1410/** Opcode 0x0f 0x6f. */
1411FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1412/** Opcode 0x0f 0x70. */
1413FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1414/** Opcode 0x0f 0x71. */
1415FNIEMOP_STUB(iemOp_Grp12);
1416/** Opcode 0x0f 0x72. */
1417FNIEMOP_STUB(iemOp_Grp13);
1418/** Opcode 0x0f 0x73. */
1419FNIEMOP_STUB(iemOp_Grp14);
1420/** Opcode 0x0f 0x74. */
1421FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1422/** Opcode 0x0f 0x75. */
1423FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1424/** Opcode 0x0f 0x76. */
1425FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1426/** Opcode 0x0f 0x77. */
1427FNIEMOP_STUB(iemOp_emms);
1428/** Opcode 0x0f 0x78. */
1429FNIEMOP_STUB(iemOp_vmread);
1430/** Opcode 0x0f 0x79. */
1431FNIEMOP_STUB(iemOp_vmwrite);
1432/** Opcode 0x0f 0x7c. */
1433FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1434/** Opcode 0x0f 0x7d. */
1435FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1436/** Opcode 0x0f 0x7e. */
1437FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1438/** Opcode 0x0f 0x7f. */
1439FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1440
1441
1442/** Opcode 0x0f 0x80. */
1443FNIEMOP_DEF(iemOp_jo_Jv)
1444{
1445 IEMOP_MNEMONIC("jo Jv");
1446 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1447 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1448 {
1449 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1450 IEMOP_HLP_NO_LOCK_PREFIX();
1451
1452 IEM_MC_BEGIN(0, 0);
1453 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1454 IEM_MC_REL_JMP_S16(i16Imm);
1455 } IEM_MC_ELSE() {
1456 IEM_MC_ADVANCE_RIP();
1457 } IEM_MC_ENDIF();
1458 IEM_MC_END();
1459 }
1460 else
1461 {
1462 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1463 IEMOP_HLP_NO_LOCK_PREFIX();
1464
1465 IEM_MC_BEGIN(0, 0);
1466 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1467 IEM_MC_REL_JMP_S32(i32Imm);
1468 } IEM_MC_ELSE() {
1469 IEM_MC_ADVANCE_RIP();
1470 } IEM_MC_ENDIF();
1471 IEM_MC_END();
1472 }
1473 return VINF_SUCCESS;
1474}
1475
1476
1477/** Opcode 0x0f 0x81. */
1478FNIEMOP_DEF(iemOp_jno_Jv)
1479{
1480 IEMOP_MNEMONIC("jno Jv");
1481 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1482 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1483 {
1484 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1485 IEMOP_HLP_NO_LOCK_PREFIX();
1486
1487 IEM_MC_BEGIN(0, 0);
1488 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1489 IEM_MC_ADVANCE_RIP();
1490 } IEM_MC_ELSE() {
1491 IEM_MC_REL_JMP_S16(i16Imm);
1492 } IEM_MC_ENDIF();
1493 IEM_MC_END();
1494 }
1495 else
1496 {
1497 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1498 IEMOP_HLP_NO_LOCK_PREFIX();
1499
1500 IEM_MC_BEGIN(0, 0);
1501 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1502 IEM_MC_ADVANCE_RIP();
1503 } IEM_MC_ELSE() {
1504 IEM_MC_REL_JMP_S32(i32Imm);
1505 } IEM_MC_ENDIF();
1506 IEM_MC_END();
1507 }
1508 return VINF_SUCCESS;
1509}
1510
1511
1512/** Opcode 0x0f 0x82. */
1513FNIEMOP_DEF(iemOp_jc_Jv)
1514{
1515 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1516 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1517 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1518 {
1519 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1520 IEMOP_HLP_NO_LOCK_PREFIX();
1521
1522 IEM_MC_BEGIN(0, 0);
1523 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1524 IEM_MC_REL_JMP_S16(i16Imm);
1525 } IEM_MC_ELSE() {
1526 IEM_MC_ADVANCE_RIP();
1527 } IEM_MC_ENDIF();
1528 IEM_MC_END();
1529 }
1530 else
1531 {
1532 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1533 IEMOP_HLP_NO_LOCK_PREFIX();
1534
1535 IEM_MC_BEGIN(0, 0);
1536 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1537 IEM_MC_REL_JMP_S32(i32Imm);
1538 } IEM_MC_ELSE() {
1539 IEM_MC_ADVANCE_RIP();
1540 } IEM_MC_ENDIF();
1541 IEM_MC_END();
1542 }
1543 return VINF_SUCCESS;
1544}
1545
1546
1547/** Opcode 0x0f 0x83. */
1548FNIEMOP_DEF(iemOp_jnc_Jv)
1549{
1550 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1551 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1552 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1553 {
1554 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1555 IEMOP_HLP_NO_LOCK_PREFIX();
1556
1557 IEM_MC_BEGIN(0, 0);
1558 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1559 IEM_MC_ADVANCE_RIP();
1560 } IEM_MC_ELSE() {
1561 IEM_MC_REL_JMP_S16(i16Imm);
1562 } IEM_MC_ENDIF();
1563 IEM_MC_END();
1564 }
1565 else
1566 {
1567 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1568 IEMOP_HLP_NO_LOCK_PREFIX();
1569
1570 IEM_MC_BEGIN(0, 0);
1571 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1572 IEM_MC_ADVANCE_RIP();
1573 } IEM_MC_ELSE() {
1574 IEM_MC_REL_JMP_S32(i32Imm);
1575 } IEM_MC_ENDIF();
1576 IEM_MC_END();
1577 }
1578 return VINF_SUCCESS;
1579}
1580
1581
1582/** Opcode 0x0f 0x84. */
1583FNIEMOP_DEF(iemOp_je_Jv)
1584{
1585 IEMOP_MNEMONIC("je/jz Jv");
1586 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1587 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1588 {
1589 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1590 IEMOP_HLP_NO_LOCK_PREFIX();
1591
1592 IEM_MC_BEGIN(0, 0);
1593 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1594 IEM_MC_REL_JMP_S16(i16Imm);
1595 } IEM_MC_ELSE() {
1596 IEM_MC_ADVANCE_RIP();
1597 } IEM_MC_ENDIF();
1598 IEM_MC_END();
1599 }
1600 else
1601 {
1602 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1603 IEMOP_HLP_NO_LOCK_PREFIX();
1604
1605 IEM_MC_BEGIN(0, 0);
1606 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1607 IEM_MC_REL_JMP_S32(i32Imm);
1608 } IEM_MC_ELSE() {
1609 IEM_MC_ADVANCE_RIP();
1610 } IEM_MC_ENDIF();
1611 IEM_MC_END();
1612 }
1613 return VINF_SUCCESS;
1614}
1615
1616
1617/** Opcode 0x0f 0x85. */
1618FNIEMOP_DEF(iemOp_jne_Jv)
1619{
1620 IEMOP_MNEMONIC("jne/jnz Jv");
1621 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1622 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1623 {
1624 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1625 IEMOP_HLP_NO_LOCK_PREFIX();
1626
1627 IEM_MC_BEGIN(0, 0);
1628 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1629 IEM_MC_ADVANCE_RIP();
1630 } IEM_MC_ELSE() {
1631 IEM_MC_REL_JMP_S16(i16Imm);
1632 } IEM_MC_ENDIF();
1633 IEM_MC_END();
1634 }
1635 else
1636 {
1637 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1638 IEMOP_HLP_NO_LOCK_PREFIX();
1639
1640 IEM_MC_BEGIN(0, 0);
1641 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1642 IEM_MC_ADVANCE_RIP();
1643 } IEM_MC_ELSE() {
1644 IEM_MC_REL_JMP_S32(i32Imm);
1645 } IEM_MC_ENDIF();
1646 IEM_MC_END();
1647 }
1648 return VINF_SUCCESS;
1649}
1650
1651
1652/** Opcode 0x0f 0x86. */
1653FNIEMOP_DEF(iemOp_jbe_Jv)
1654{
1655 IEMOP_MNEMONIC("jbe/jna Jv");
1656 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1657 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1658 {
1659 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1660 IEMOP_HLP_NO_LOCK_PREFIX();
1661
1662 IEM_MC_BEGIN(0, 0);
1663 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1664 IEM_MC_REL_JMP_S16(i16Imm);
1665 } IEM_MC_ELSE() {
1666 IEM_MC_ADVANCE_RIP();
1667 } IEM_MC_ENDIF();
1668 IEM_MC_END();
1669 }
1670 else
1671 {
1672 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1673 IEMOP_HLP_NO_LOCK_PREFIX();
1674
1675 IEM_MC_BEGIN(0, 0);
1676 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1677 IEM_MC_REL_JMP_S32(i32Imm);
1678 } IEM_MC_ELSE() {
1679 IEM_MC_ADVANCE_RIP();
1680 } IEM_MC_ENDIF();
1681 IEM_MC_END();
1682 }
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/** Opcode 0x0f 0x87. */
1688FNIEMOP_DEF(iemOp_jnbe_Jv)
1689{
1690 IEMOP_MNEMONIC("jnbe/ja Jv");
1691 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1692 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1693 {
1694 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1695 IEMOP_HLP_NO_LOCK_PREFIX();
1696
1697 IEM_MC_BEGIN(0, 0);
1698 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1699 IEM_MC_ADVANCE_RIP();
1700 } IEM_MC_ELSE() {
1701 IEM_MC_REL_JMP_S16(i16Imm);
1702 } IEM_MC_ENDIF();
1703 IEM_MC_END();
1704 }
1705 else
1706 {
1707 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1708 IEMOP_HLP_NO_LOCK_PREFIX();
1709
1710 IEM_MC_BEGIN(0, 0);
1711 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1712 IEM_MC_ADVANCE_RIP();
1713 } IEM_MC_ELSE() {
1714 IEM_MC_REL_JMP_S32(i32Imm);
1715 } IEM_MC_ENDIF();
1716 IEM_MC_END();
1717 }
1718 return VINF_SUCCESS;
1719}
1720
1721
1722/** Opcode 0x0f 0x88. */
1723FNIEMOP_DEF(iemOp_js_Jv)
1724{
1725 IEMOP_MNEMONIC("js Jv");
1726 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1727 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1728 {
1729 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1730 IEMOP_HLP_NO_LOCK_PREFIX();
1731
1732 IEM_MC_BEGIN(0, 0);
1733 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1734 IEM_MC_REL_JMP_S16(i16Imm);
1735 } IEM_MC_ELSE() {
1736 IEM_MC_ADVANCE_RIP();
1737 } IEM_MC_ENDIF();
1738 IEM_MC_END();
1739 }
1740 else
1741 {
1742 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1743 IEMOP_HLP_NO_LOCK_PREFIX();
1744
1745 IEM_MC_BEGIN(0, 0);
1746 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1747 IEM_MC_REL_JMP_S32(i32Imm);
1748 } IEM_MC_ELSE() {
1749 IEM_MC_ADVANCE_RIP();
1750 } IEM_MC_ENDIF();
1751 IEM_MC_END();
1752 }
1753 return VINF_SUCCESS;
1754}
1755
1756
1757/** Opcode 0x0f 0x89. */
1758FNIEMOP_DEF(iemOp_jns_Jv)
1759{
1760 IEMOP_MNEMONIC("jns Jv");
1761 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1762 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1763 {
1764 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1765 IEMOP_HLP_NO_LOCK_PREFIX();
1766
1767 IEM_MC_BEGIN(0, 0);
1768 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1769 IEM_MC_ADVANCE_RIP();
1770 } IEM_MC_ELSE() {
1771 IEM_MC_REL_JMP_S16(i16Imm);
1772 } IEM_MC_ENDIF();
1773 IEM_MC_END();
1774 }
1775 else
1776 {
1777 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1778 IEMOP_HLP_NO_LOCK_PREFIX();
1779
1780 IEM_MC_BEGIN(0, 0);
1781 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1782 IEM_MC_ADVANCE_RIP();
1783 } IEM_MC_ELSE() {
1784 IEM_MC_REL_JMP_S32(i32Imm);
1785 } IEM_MC_ENDIF();
1786 IEM_MC_END();
1787 }
1788 return VINF_SUCCESS;
1789}
1790
1791
1792/** Opcode 0x0f 0x8a. */
1793FNIEMOP_DEF(iemOp_jp_Jv)
1794{
1795 IEMOP_MNEMONIC("jp Jv");
1796 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1797 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1798 {
1799 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1800 IEMOP_HLP_NO_LOCK_PREFIX();
1801
1802 IEM_MC_BEGIN(0, 0);
1803 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1804 IEM_MC_REL_JMP_S16(i16Imm);
1805 } IEM_MC_ELSE() {
1806 IEM_MC_ADVANCE_RIP();
1807 } IEM_MC_ENDIF();
1808 IEM_MC_END();
1809 }
1810 else
1811 {
1812 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1813 IEMOP_HLP_NO_LOCK_PREFIX();
1814
1815 IEM_MC_BEGIN(0, 0);
1816 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1817 IEM_MC_REL_JMP_S32(i32Imm);
1818 } IEM_MC_ELSE() {
1819 IEM_MC_ADVANCE_RIP();
1820 } IEM_MC_ENDIF();
1821 IEM_MC_END();
1822 }
1823 return VINF_SUCCESS;
1824}
1825
1826
1827/** Opcode 0x0f 0x8b. */
1828FNIEMOP_DEF(iemOp_jnp_Jv)
1829{
1830 IEMOP_MNEMONIC("jo Jv");
1831 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1832 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1833 {
1834 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1835 IEMOP_HLP_NO_LOCK_PREFIX();
1836
1837 IEM_MC_BEGIN(0, 0);
1838 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1839 IEM_MC_ADVANCE_RIP();
1840 } IEM_MC_ELSE() {
1841 IEM_MC_REL_JMP_S16(i16Imm);
1842 } IEM_MC_ENDIF();
1843 IEM_MC_END();
1844 }
1845 else
1846 {
1847 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1848 IEMOP_HLP_NO_LOCK_PREFIX();
1849
1850 IEM_MC_BEGIN(0, 0);
1851 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1852 IEM_MC_ADVANCE_RIP();
1853 } IEM_MC_ELSE() {
1854 IEM_MC_REL_JMP_S32(i32Imm);
1855 } IEM_MC_ENDIF();
1856 IEM_MC_END();
1857 }
1858 return VINF_SUCCESS;
1859}
1860
1861
1862/** Opcode 0x0f 0x8c. */
1863FNIEMOP_DEF(iemOp_jl_Jv)
1864{
1865 IEMOP_MNEMONIC("jl/jnge Jv");
1866 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1867 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1868 {
1869 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1870 IEMOP_HLP_NO_LOCK_PREFIX();
1871
1872 IEM_MC_BEGIN(0, 0);
1873 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1874 IEM_MC_REL_JMP_S16(i16Imm);
1875 } IEM_MC_ELSE() {
1876 IEM_MC_ADVANCE_RIP();
1877 } IEM_MC_ENDIF();
1878 IEM_MC_END();
1879 }
1880 else
1881 {
1882 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1883 IEMOP_HLP_NO_LOCK_PREFIX();
1884
1885 IEM_MC_BEGIN(0, 0);
1886 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1887 IEM_MC_REL_JMP_S32(i32Imm);
1888 } IEM_MC_ELSE() {
1889 IEM_MC_ADVANCE_RIP();
1890 } IEM_MC_ENDIF();
1891 IEM_MC_END();
1892 }
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/** Opcode 0x0f 0x8d. */
1898FNIEMOP_DEF(iemOp_jnl_Jv)
1899{
1900 IEMOP_MNEMONIC("jnl/jge Jv");
1901 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1902 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1903 {
1904 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1905 IEMOP_HLP_NO_LOCK_PREFIX();
1906
1907 IEM_MC_BEGIN(0, 0);
1908 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1909 IEM_MC_ADVANCE_RIP();
1910 } IEM_MC_ELSE() {
1911 IEM_MC_REL_JMP_S16(i16Imm);
1912 } IEM_MC_ENDIF();
1913 IEM_MC_END();
1914 }
1915 else
1916 {
1917 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1918 IEMOP_HLP_NO_LOCK_PREFIX();
1919
1920 IEM_MC_BEGIN(0, 0);
1921 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1922 IEM_MC_ADVANCE_RIP();
1923 } IEM_MC_ELSE() {
1924 IEM_MC_REL_JMP_S32(i32Imm);
1925 } IEM_MC_ENDIF();
1926 IEM_MC_END();
1927 }
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/** Opcode 0x0f 0x8e. */
1933FNIEMOP_DEF(iemOp_jle_Jv)
1934{
1935 IEMOP_MNEMONIC("jle/jng Jv");
1936 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1937 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1938 {
1939 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1940 IEMOP_HLP_NO_LOCK_PREFIX();
1941
1942 IEM_MC_BEGIN(0, 0);
1943 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1944 IEM_MC_REL_JMP_S16(i16Imm);
1945 } IEM_MC_ELSE() {
1946 IEM_MC_ADVANCE_RIP();
1947 } IEM_MC_ENDIF();
1948 IEM_MC_END();
1949 }
1950 else
1951 {
1952 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1953 IEMOP_HLP_NO_LOCK_PREFIX();
1954
1955 IEM_MC_BEGIN(0, 0);
1956 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1957 IEM_MC_REL_JMP_S32(i32Imm);
1958 } IEM_MC_ELSE() {
1959 IEM_MC_ADVANCE_RIP();
1960 } IEM_MC_ENDIF();
1961 IEM_MC_END();
1962 }
1963 return VINF_SUCCESS;
1964}
1965
1966
1967/** Opcode 0x0f 0x8f. */
1968FNIEMOP_DEF(iemOp_jnle_Jv)
1969{
1970 IEMOP_MNEMONIC("jnle/jg Jv");
1971 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1972 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1973 {
1974 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1975 IEMOP_HLP_NO_LOCK_PREFIX();
1976
1977 IEM_MC_BEGIN(0, 0);
1978 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1979 IEM_MC_ADVANCE_RIP();
1980 } IEM_MC_ELSE() {
1981 IEM_MC_REL_JMP_S16(i16Imm);
1982 } IEM_MC_ENDIF();
1983 IEM_MC_END();
1984 }
1985 else
1986 {
1987 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1988 IEMOP_HLP_NO_LOCK_PREFIX();
1989
1990 IEM_MC_BEGIN(0, 0);
1991 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1992 IEM_MC_ADVANCE_RIP();
1993 } IEM_MC_ELSE() {
1994 IEM_MC_REL_JMP_S32(i32Imm);
1995 } IEM_MC_ENDIF();
1996 IEM_MC_END();
1997 }
1998 return VINF_SUCCESS;
1999}
2000
2001
2002/** Opcode 0x0f 0x90. */
2003FNIEMOP_DEF(iemOp_seto_Eb)
2004{
2005 IEMOP_MNEMONIC("seto Eb");
2006 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2007 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2008
2009 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2010 * any way. AMD says it's "unused", whatever that means. We're
2011 * ignoring for now. */
2012 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2013 {
2014 /* register target */
2015 IEM_MC_BEGIN(0, 0);
2016 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2017 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2018 } IEM_MC_ELSE() {
2019 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2020 } IEM_MC_ENDIF();
2021 IEM_MC_ADVANCE_RIP();
2022 IEM_MC_END();
2023 }
2024 else
2025 {
2026 /* memory target */
2027 IEM_MC_BEGIN(0, 1);
2028 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2030 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2031 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2032 } IEM_MC_ELSE() {
2033 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2034 } IEM_MC_ENDIF();
2035 IEM_MC_ADVANCE_RIP();
2036 IEM_MC_END();
2037 }
2038 return VINF_SUCCESS;
2039}
2040
2041
2042/** Opcode 0x0f 0x91. */
2043FNIEMOP_DEF(iemOp_setno_Eb)
2044{
2045 IEMOP_MNEMONIC("setno Eb");
2046 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2047 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2048
2049 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2050 * any way. AMD says it's "unused", whatever that means. We're
2051 * ignoring for now. */
2052 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2053 {
2054 /* register target */
2055 IEM_MC_BEGIN(0, 0);
2056 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2057 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2058 } IEM_MC_ELSE() {
2059 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2060 } IEM_MC_ENDIF();
2061 IEM_MC_ADVANCE_RIP();
2062 IEM_MC_END();
2063 }
2064 else
2065 {
2066 /* memory target */
2067 IEM_MC_BEGIN(0, 1);
2068 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2069 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2070 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2071 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2072 } IEM_MC_ELSE() {
2073 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2074 } IEM_MC_ENDIF();
2075 IEM_MC_ADVANCE_RIP();
2076 IEM_MC_END();
2077 }
2078 return VINF_SUCCESS;
2079}
2080
2081
2082/** Opcode 0x0f 0x92. */
2083FNIEMOP_DEF(iemOp_setc_Eb)
2084{
2085 IEMOP_MNEMONIC("setc Eb");
2086 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2087 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2088
2089 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2090 * any way. AMD says it's "unused", whatever that means. We're
2091 * ignoring for now. */
2092 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2093 {
2094 /* register target */
2095 IEM_MC_BEGIN(0, 0);
2096 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2097 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2098 } IEM_MC_ELSE() {
2099 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2100 } IEM_MC_ENDIF();
2101 IEM_MC_ADVANCE_RIP();
2102 IEM_MC_END();
2103 }
2104 else
2105 {
2106 /* memory target */
2107 IEM_MC_BEGIN(0, 1);
2108 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2109 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2110 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2111 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2112 } IEM_MC_ELSE() {
2113 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2114 } IEM_MC_ENDIF();
2115 IEM_MC_ADVANCE_RIP();
2116 IEM_MC_END();
2117 }
2118 return VINF_SUCCESS;
2119}
2120
2121
2122/** Opcode 0x0f 0x93. */
2123FNIEMOP_DEF(iemOp_setnc_Eb)
2124{
2125 IEMOP_MNEMONIC("setnc Eb");
2126 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2127 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2128
2129 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2130 * any way. AMD says it's "unused", whatever that means. We're
2131 * ignoring for now. */
2132 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2133 {
2134 /* register target */
2135 IEM_MC_BEGIN(0, 0);
2136 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2137 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2138 } IEM_MC_ELSE() {
2139 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2140 } IEM_MC_ENDIF();
2141 IEM_MC_ADVANCE_RIP();
2142 IEM_MC_END();
2143 }
2144 else
2145 {
2146 /* memory target */
2147 IEM_MC_BEGIN(0, 1);
2148 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2149 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2150 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2151 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2152 } IEM_MC_ELSE() {
2153 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2154 } IEM_MC_ENDIF();
2155 IEM_MC_ADVANCE_RIP();
2156 IEM_MC_END();
2157 }
2158 return VINF_SUCCESS;
2159}
2160
2161
2162/** Opcode 0x0f 0x94. */
2163FNIEMOP_DEF(iemOp_sete_Eb)
2164{
2165 IEMOP_MNEMONIC("sete Eb");
2166 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2167 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2168
2169 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2170 * any way. AMD says it's "unused", whatever that means. We're
2171 * ignoring for now. */
2172 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2173 {
2174 /* register target */
2175 IEM_MC_BEGIN(0, 0);
2176 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2177 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2178 } IEM_MC_ELSE() {
2179 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2180 } IEM_MC_ENDIF();
2181 IEM_MC_ADVANCE_RIP();
2182 IEM_MC_END();
2183 }
2184 else
2185 {
2186 /* memory target */
2187 IEM_MC_BEGIN(0, 1);
2188 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2189 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2190 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2191 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2192 } IEM_MC_ELSE() {
2193 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2194 } IEM_MC_ENDIF();
2195 IEM_MC_ADVANCE_RIP();
2196 IEM_MC_END();
2197 }
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/** Opcode 0x0f 0x95. */
2203FNIEMOP_DEF(iemOp_setne_Eb)
2204{
2205 IEMOP_MNEMONIC("setne Eb");
2206 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2207 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2208
2209 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2210 * any way. AMD says it's "unused", whatever that means. We're
2211 * ignoring for now. */
2212 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2213 {
2214 /* register target */
2215 IEM_MC_BEGIN(0, 0);
2216 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2217 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2218 } IEM_MC_ELSE() {
2219 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2220 } IEM_MC_ENDIF();
2221 IEM_MC_ADVANCE_RIP();
2222 IEM_MC_END();
2223 }
2224 else
2225 {
2226 /* memory target */
2227 IEM_MC_BEGIN(0, 1);
2228 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2229 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2230 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2231 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2232 } IEM_MC_ELSE() {
2233 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2234 } IEM_MC_ENDIF();
2235 IEM_MC_ADVANCE_RIP();
2236 IEM_MC_END();
2237 }
2238 return VINF_SUCCESS;
2239}
2240
2241
2242/** Opcode 0x0f 0x96. */
2243FNIEMOP_DEF(iemOp_setbe_Eb)
2244{
2245 IEMOP_MNEMONIC("setbe Eb");
2246 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2247 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2248
2249 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2250 * any way. AMD says it's "unused", whatever that means. We're
2251 * ignoring for now. */
2252 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2253 {
2254 /* register target */
2255 IEM_MC_BEGIN(0, 0);
2256 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2257 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2258 } IEM_MC_ELSE() {
2259 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2260 } IEM_MC_ENDIF();
2261 IEM_MC_ADVANCE_RIP();
2262 IEM_MC_END();
2263 }
2264 else
2265 {
2266 /* memory target */
2267 IEM_MC_BEGIN(0, 1);
2268 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2269 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2270 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2271 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2272 } IEM_MC_ELSE() {
2273 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2274 } IEM_MC_ENDIF();
2275 IEM_MC_ADVANCE_RIP();
2276 IEM_MC_END();
2277 }
2278 return VINF_SUCCESS;
2279}
2280
2281
2282/** Opcode 0x0f 0x97. */
2283FNIEMOP_DEF(iemOp_setnbe_Eb)
2284{
2285 IEMOP_MNEMONIC("setnbe Eb");
2286 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2287 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2288
2289 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2290 * any way. AMD says it's "unused", whatever that means. We're
2291 * ignoring for now. */
2292 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2293 {
2294 /* register target */
2295 IEM_MC_BEGIN(0, 0);
2296 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2297 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2298 } IEM_MC_ELSE() {
2299 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2300 } IEM_MC_ENDIF();
2301 IEM_MC_ADVANCE_RIP();
2302 IEM_MC_END();
2303 }
2304 else
2305 {
2306 /* memory target */
2307 IEM_MC_BEGIN(0, 1);
2308 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2309 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2310 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2311 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2312 } IEM_MC_ELSE() {
2313 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2314 } IEM_MC_ENDIF();
2315 IEM_MC_ADVANCE_RIP();
2316 IEM_MC_END();
2317 }
2318 return VINF_SUCCESS;
2319}
2320
2321
2322/** Opcode 0x0f 0x98. */
2323FNIEMOP_DEF(iemOp_sets_Eb)
2324{
2325 IEMOP_MNEMONIC("sets Eb");
2326 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2327 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2328
2329 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2330 * any way. AMD says it's "unused", whatever that means. We're
2331 * ignoring for now. */
2332 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2333 {
2334 /* register target */
2335 IEM_MC_BEGIN(0, 0);
2336 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2337 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2338 } IEM_MC_ELSE() {
2339 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2340 } IEM_MC_ENDIF();
2341 IEM_MC_ADVANCE_RIP();
2342 IEM_MC_END();
2343 }
2344 else
2345 {
2346 /* memory target */
2347 IEM_MC_BEGIN(0, 1);
2348 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2349 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2350 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2351 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2352 } IEM_MC_ELSE() {
2353 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2354 } IEM_MC_ENDIF();
2355 IEM_MC_ADVANCE_RIP();
2356 IEM_MC_END();
2357 }
2358 return VINF_SUCCESS;
2359}
2360
2361
2362/** Opcode 0x0f 0x99. */
2363FNIEMOP_DEF(iemOp_setns_Eb)
2364{
2365 IEMOP_MNEMONIC("setns Eb");
2366 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2367 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2368
2369 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2370 * any way. AMD says it's "unused", whatever that means. We're
2371 * ignoring for now. */
2372 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2373 {
2374 /* register target */
2375 IEM_MC_BEGIN(0, 0);
2376 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2377 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2378 } IEM_MC_ELSE() {
2379 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2380 } IEM_MC_ENDIF();
2381 IEM_MC_ADVANCE_RIP();
2382 IEM_MC_END();
2383 }
2384 else
2385 {
2386 /* memory target */
2387 IEM_MC_BEGIN(0, 1);
2388 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2389 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2390 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2391 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2392 } IEM_MC_ELSE() {
2393 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2394 } IEM_MC_ENDIF();
2395 IEM_MC_ADVANCE_RIP();
2396 IEM_MC_END();
2397 }
2398 return VINF_SUCCESS;
2399}
2400
2401
2402/** Opcode 0x0f 0x9a. */
2403FNIEMOP_DEF(iemOp_setp_Eb)
2404{
2405 IEMOP_MNEMONIC("setnp Eb");
2406 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2407 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2408
2409 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2410 * any way. AMD says it's "unused", whatever that means. We're
2411 * ignoring for now. */
2412 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2413 {
2414 /* register target */
2415 IEM_MC_BEGIN(0, 0);
2416 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2417 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2418 } IEM_MC_ELSE() {
2419 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2420 } IEM_MC_ENDIF();
2421 IEM_MC_ADVANCE_RIP();
2422 IEM_MC_END();
2423 }
2424 else
2425 {
2426 /* memory target */
2427 IEM_MC_BEGIN(0, 1);
2428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2429 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2430 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2431 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2432 } IEM_MC_ELSE() {
2433 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2434 } IEM_MC_ENDIF();
2435 IEM_MC_ADVANCE_RIP();
2436 IEM_MC_END();
2437 }
2438 return VINF_SUCCESS;
2439}
2440
2441
2442/** Opcode 0x0f 0x9b. */
2443FNIEMOP_DEF(iemOp_setnp_Eb)
2444{
2445 IEMOP_MNEMONIC("setnp Eb");
2446 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2447 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2448
2449 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2450 * any way. AMD says it's "unused", whatever that means. We're
2451 * ignoring for now. */
2452 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2453 {
2454 /* register target */
2455 IEM_MC_BEGIN(0, 0);
2456 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2457 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2458 } IEM_MC_ELSE() {
2459 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2460 } IEM_MC_ENDIF();
2461 IEM_MC_ADVANCE_RIP();
2462 IEM_MC_END();
2463 }
2464 else
2465 {
2466 /* memory target */
2467 IEM_MC_BEGIN(0, 1);
2468 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2469 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2470 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2471 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2472 } IEM_MC_ELSE() {
2473 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2474 } IEM_MC_ENDIF();
2475 IEM_MC_ADVANCE_RIP();
2476 IEM_MC_END();
2477 }
2478 return VINF_SUCCESS;
2479}
2480
2481
2482/** Opcode 0x0f 0x9c. */
2483FNIEMOP_DEF(iemOp_setl_Eb)
2484{
2485 IEMOP_MNEMONIC("setl Eb");
2486 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2487 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2488
2489 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2490 * any way. AMD says it's "unused", whatever that means. We're
2491 * ignoring for now. */
2492 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2493 {
2494 /* register target */
2495 IEM_MC_BEGIN(0, 0);
2496 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2497 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2498 } IEM_MC_ELSE() {
2499 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2500 } IEM_MC_ENDIF();
2501 IEM_MC_ADVANCE_RIP();
2502 IEM_MC_END();
2503 }
2504 else
2505 {
2506 /* memory target */
2507 IEM_MC_BEGIN(0, 1);
2508 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2509 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2510 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2511 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2512 } IEM_MC_ELSE() {
2513 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2514 } IEM_MC_ENDIF();
2515 IEM_MC_ADVANCE_RIP();
2516 IEM_MC_END();
2517 }
2518 return VINF_SUCCESS;
2519}
2520
2521
2522/** Opcode 0x0f 0x9d. */
2523FNIEMOP_DEF(iemOp_setnl_Eb)
2524{
2525 IEMOP_MNEMONIC("setnl Eb");
2526 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2527 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2528
2529 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2530 * any way. AMD says it's "unused", whatever that means. We're
2531 * ignoring for now. */
2532 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2533 {
2534 /* register target */
2535 IEM_MC_BEGIN(0, 0);
2536 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2537 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2538 } IEM_MC_ELSE() {
2539 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2540 } IEM_MC_ENDIF();
2541 IEM_MC_ADVANCE_RIP();
2542 IEM_MC_END();
2543 }
2544 else
2545 {
2546 /* memory target */
2547 IEM_MC_BEGIN(0, 1);
2548 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2549 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2550 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2551 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2552 } IEM_MC_ELSE() {
2553 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2554 } IEM_MC_ENDIF();
2555 IEM_MC_ADVANCE_RIP();
2556 IEM_MC_END();
2557 }
2558 return VINF_SUCCESS;
2559}
2560
2561
2562/** Opcode 0x0f 0x9e. */
2563FNIEMOP_DEF(iemOp_setle_Eb)
2564{
2565 IEMOP_MNEMONIC("setle Eb");
2566 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2567 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2568
2569 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2570 * any way. AMD says it's "unused", whatever that means. We're
2571 * ignoring for now. */
2572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2573 {
2574 /* register target */
2575 IEM_MC_BEGIN(0, 0);
2576 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2577 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2578 } IEM_MC_ELSE() {
2579 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2580 } IEM_MC_ENDIF();
2581 IEM_MC_ADVANCE_RIP();
2582 IEM_MC_END();
2583 }
2584 else
2585 {
2586 /* memory target */
2587 IEM_MC_BEGIN(0, 1);
2588 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2589 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2590 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2591 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2592 } IEM_MC_ELSE() {
2593 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2594 } IEM_MC_ENDIF();
2595 IEM_MC_ADVANCE_RIP();
2596 IEM_MC_END();
2597 }
2598 return VINF_SUCCESS;
2599}
2600
2601
2602/** Opcode 0x0f 0x9f. */
2603FNIEMOP_DEF(iemOp_setnle_Eb)
2604{
2605 IEMOP_MNEMONIC("setnle Eb");
2606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2607 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2608
2609 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2610 * any way. AMD says it's "unused", whatever that means. We're
2611 * ignoring for now. */
2612 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2613 {
2614 /* register target */
2615 IEM_MC_BEGIN(0, 0);
2616 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2617 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2618 } IEM_MC_ELSE() {
2619 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2620 } IEM_MC_ENDIF();
2621 IEM_MC_ADVANCE_RIP();
2622 IEM_MC_END();
2623 }
2624 else
2625 {
2626 /* memory target */
2627 IEM_MC_BEGIN(0, 1);
2628 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2629 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2630 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2631 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2632 } IEM_MC_ELSE() {
2633 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2634 } IEM_MC_ENDIF();
2635 IEM_MC_ADVANCE_RIP();
2636 IEM_MC_END();
2637 }
2638 return VINF_SUCCESS;
2639}
2640
2641
2642/**
2643 * Common 'push segment-register' helper.
2644 */
2645FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2646{
2647 IEMOP_HLP_NO_LOCK_PREFIX();
2648 if (iReg < X86_SREG_FS)
2649 IEMOP_HLP_NO_64BIT();
2650 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2651
2652 switch (pIemCpu->enmEffOpSize)
2653 {
2654 case IEMMODE_16BIT:
2655 IEM_MC_BEGIN(0, 1);
2656 IEM_MC_LOCAL(uint16_t, u16Value);
2657 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2658 IEM_MC_PUSH_U16(u16Value);
2659 IEM_MC_ADVANCE_RIP();
2660 IEM_MC_END();
2661 break;
2662
2663 case IEMMODE_32BIT:
2664 IEM_MC_BEGIN(0, 1);
2665 IEM_MC_LOCAL(uint32_t, u32Value);
2666 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2667 IEM_MC_PUSH_U32(u32Value);
2668 IEM_MC_ADVANCE_RIP();
2669 IEM_MC_END();
2670 break;
2671
2672 case IEMMODE_64BIT:
2673 IEM_MC_BEGIN(0, 1);
2674 IEM_MC_LOCAL(uint64_t, u64Value);
2675 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2676 IEM_MC_PUSH_U64(u64Value);
2677 IEM_MC_ADVANCE_RIP();
2678 IEM_MC_END();
2679 break;
2680 }
2681
2682 return VINF_SUCCESS;
2683}
2684
2685
2686/** Opcode 0x0f 0xa0. */
2687FNIEMOP_DEF(iemOp_push_fs)
2688{
2689 IEMOP_MNEMONIC("push fs");
2690 IEMOP_HLP_NO_LOCK_PREFIX();
2691 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2692}
2693
2694
2695/** Opcode 0x0f 0xa1. */
2696FNIEMOP_DEF(iemOp_pop_fs)
2697{
2698 IEMOP_MNEMONIC("pop fs");
2699 IEMOP_HLP_NO_LOCK_PREFIX();
2700 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2701}
2702
2703
2704/** Opcode 0x0f 0xa2. */
2705FNIEMOP_DEF(iemOp_cpuid)
2706{
2707 IEMOP_MNEMONIC("cpuid");
2708 IEMOP_HLP_NO_LOCK_PREFIX();
2709 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2710}
2711
2712
2713/**
2714 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2715 * iemOp_bts_Ev_Gv.
2716 */
2717FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2718{
2719 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2720 IEMOP_HLP_NO_LOCK_PREFIX();
2721 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2722
2723 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2724 {
2725 /* register destination. */
2726 IEMOP_HLP_NO_LOCK_PREFIX();
2727 switch (pIemCpu->enmEffOpSize)
2728 {
2729 case IEMMODE_16BIT:
2730 IEM_MC_BEGIN(3, 0);
2731 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2732 IEM_MC_ARG(uint16_t, u16Src, 1);
2733 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2734
2735 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2736 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2737 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2738 IEM_MC_REF_EFLAGS(pEFlags);
2739 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2740
2741 IEM_MC_ADVANCE_RIP();
2742 IEM_MC_END();
2743 return VINF_SUCCESS;
2744
2745 case IEMMODE_32BIT:
2746 IEM_MC_BEGIN(3, 0);
2747 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2748 IEM_MC_ARG(uint32_t, u32Src, 1);
2749 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2750
2751 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2752 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2753 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2754 IEM_MC_REF_EFLAGS(pEFlags);
2755 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2756
2757 IEM_MC_ADVANCE_RIP();
2758 IEM_MC_END();
2759 return VINF_SUCCESS;
2760
2761 case IEMMODE_64BIT:
2762 IEM_MC_BEGIN(3, 0);
2763 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2764 IEM_MC_ARG(uint64_t, u64Src, 1);
2765 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2766
2767 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2768 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2769 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2770 IEM_MC_REF_EFLAGS(pEFlags);
2771 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2772
2773 IEM_MC_ADVANCE_RIP();
2774 IEM_MC_END();
2775 return VINF_SUCCESS;
2776
2777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2778 }
2779 }
2780 else
2781 {
2782 /* memory destination. */
2783
2784 uint32_t fAccess;
2785 if (pImpl->pfnLockedU16)
2786 fAccess = IEM_ACCESS_DATA_RW;
2787 else /* BT */
2788 {
2789 IEMOP_HLP_NO_LOCK_PREFIX();
2790 fAccess = IEM_ACCESS_DATA_R;
2791 }
2792
2793 /** @todo test negative bit offsets! */
2794 switch (pIemCpu->enmEffOpSize)
2795 {
2796 case IEMMODE_16BIT:
2797 IEM_MC_BEGIN(3, 2);
2798 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2799 IEM_MC_ARG(uint16_t, u16Src, 1);
2800 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2801 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2802 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2803
2804 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2805 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2806 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2807 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2808 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2809 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2810 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2811 IEM_MC_FETCH_EFLAGS(EFlags);
2812
2813 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2814 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2815 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2816 else
2817 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2818 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2819
2820 IEM_MC_COMMIT_EFLAGS(EFlags);
2821 IEM_MC_ADVANCE_RIP();
2822 IEM_MC_END();
2823 return VINF_SUCCESS;
2824
2825 case IEMMODE_32BIT:
2826 IEM_MC_BEGIN(3, 2);
2827 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2828 IEM_MC_ARG(uint32_t, u32Src, 1);
2829 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2830 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2831 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2832
2833 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2834 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2835 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2836 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2837 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2838 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2839 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2840 IEM_MC_FETCH_EFLAGS(EFlags);
2841
2842 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2843 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2844 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2845 else
2846 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2847 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2848
2849 IEM_MC_COMMIT_EFLAGS(EFlags);
2850 IEM_MC_ADVANCE_RIP();
2851 IEM_MC_END();
2852 return VINF_SUCCESS;
2853
2854 case IEMMODE_64BIT:
2855 IEM_MC_BEGIN(3, 2);
2856 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2857 IEM_MC_ARG(uint64_t, u64Src, 1);
2858 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2859 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2860 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2861
2862 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2863 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2864 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2865 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2866 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2867 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2868 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2869 IEM_MC_FETCH_EFLAGS(EFlags);
2870
2871 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2872 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2873 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2874 else
2875 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2876 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2877
2878 IEM_MC_COMMIT_EFLAGS(EFlags);
2879 IEM_MC_ADVANCE_RIP();
2880 IEM_MC_END();
2881 return VINF_SUCCESS;
2882
2883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2884 }
2885 }
2886}
2887
2888
2889/** Opcode 0x0f 0xa3. */
2890FNIEMOP_DEF(iemOp_bt_Ev_Gv)
2891{
2892 IEMOP_MNEMONIC("bt Gv,Mp");
2893 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
2894}
2895
2896
2897/**
2898 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
2899 */
2900FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
2901{
2902 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2903 IEMOP_HLP_NO_LOCK_PREFIX();
2904 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
2905
2906 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2907 {
2908 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2909 IEMOP_HLP_NO_LOCK_PREFIX();
2910
2911 switch (pIemCpu->enmEffOpSize)
2912 {
2913 case IEMMODE_16BIT:
2914 IEM_MC_BEGIN(4, 0);
2915 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2916 IEM_MC_ARG(uint16_t, u16Src, 1);
2917 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2918 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2919
2920 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2921 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2922 IEM_MC_REF_EFLAGS(pEFlags);
2923 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2924
2925 IEM_MC_ADVANCE_RIP();
2926 IEM_MC_END();
2927 return VINF_SUCCESS;
2928
2929 case IEMMODE_32BIT:
2930 IEM_MC_BEGIN(4, 0);
2931 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2932 IEM_MC_ARG(uint32_t, u32Src, 1);
2933 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2934 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2935
2936 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2937 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2938 IEM_MC_REF_EFLAGS(pEFlags);
2939 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
2940
2941 IEM_MC_ADVANCE_RIP();
2942 IEM_MC_END();
2943 return VINF_SUCCESS;
2944
2945 case IEMMODE_64BIT:
2946 IEM_MC_BEGIN(4, 0);
2947 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2948 IEM_MC_ARG(uint64_t, u64Src, 1);
2949 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2950 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2951
2952 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2953 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2954 IEM_MC_REF_EFLAGS(pEFlags);
2955 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
2956
2957 IEM_MC_ADVANCE_RIP();
2958 IEM_MC_END();
2959 return VINF_SUCCESS;
2960
2961 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2962 }
2963 }
2964 else
2965 {
2966 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2967
2968 switch (pIemCpu->enmEffOpSize)
2969 {
2970 case IEMMODE_16BIT:
2971 IEM_MC_BEGIN(4, 2);
2972 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2973 IEM_MC_ARG(uint16_t, u16Src, 1);
2974 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2975 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2976 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2977
2978 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2979 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2980 IEM_MC_ASSIGN(cShiftArg, cShift);
2981 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2982 IEM_MC_FETCH_EFLAGS(EFlags);
2983 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2984 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2985
2986 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2987 IEM_MC_COMMIT_EFLAGS(EFlags);
2988 IEM_MC_ADVANCE_RIP();
2989 IEM_MC_END();
2990 return VINF_SUCCESS;
2991
2992 case IEMMODE_32BIT:
2993 IEM_MC_BEGIN(4, 2);
2994 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2995 IEM_MC_ARG(uint32_t, u32Src, 1);
2996 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2997 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2998 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2999
3000 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3001 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3002 IEM_MC_ASSIGN(cShiftArg, cShift);
3003 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3004 IEM_MC_FETCH_EFLAGS(EFlags);
3005 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3006 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3007
3008 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3009 IEM_MC_COMMIT_EFLAGS(EFlags);
3010 IEM_MC_ADVANCE_RIP();
3011 IEM_MC_END();
3012 return VINF_SUCCESS;
3013
3014 case IEMMODE_64BIT:
3015 IEM_MC_BEGIN(4, 2);
3016 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3017 IEM_MC_ARG(uint64_t, u64Src, 1);
3018 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3019 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3020 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3021
3022 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3023 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3024 IEM_MC_ASSIGN(cShiftArg, cShift);
3025 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3026 IEM_MC_FETCH_EFLAGS(EFlags);
3027 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3028 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3029
3030 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3031 IEM_MC_COMMIT_EFLAGS(EFlags);
3032 IEM_MC_ADVANCE_RIP();
3033 IEM_MC_END();
3034 return VINF_SUCCESS;
3035
3036 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3037 }
3038 }
3039}
3040
3041
3042/**
3043 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3044 */
3045FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3046{
3047 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3048 IEMOP_HLP_NO_LOCK_PREFIX();
3049 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3050
3051 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3052 {
3053 IEMOP_HLP_NO_LOCK_PREFIX();
3054
3055 switch (pIemCpu->enmEffOpSize)
3056 {
3057 case IEMMODE_16BIT:
3058 IEM_MC_BEGIN(4, 0);
3059 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3060 IEM_MC_ARG(uint16_t, u16Src, 1);
3061 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3062 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3063
3064 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3065 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3066 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3067 IEM_MC_REF_EFLAGS(pEFlags);
3068 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3069
3070 IEM_MC_ADVANCE_RIP();
3071 IEM_MC_END();
3072 return VINF_SUCCESS;
3073
3074 case IEMMODE_32BIT:
3075 IEM_MC_BEGIN(4, 0);
3076 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3077 IEM_MC_ARG(uint32_t, u32Src, 1);
3078 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3079 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3080
3081 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3082 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3083 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3084 IEM_MC_REF_EFLAGS(pEFlags);
3085 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3086
3087 IEM_MC_ADVANCE_RIP();
3088 IEM_MC_END();
3089 return VINF_SUCCESS;
3090
3091 case IEMMODE_64BIT:
3092 IEM_MC_BEGIN(4, 0);
3093 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3094 IEM_MC_ARG(uint64_t, u64Src, 1);
3095 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3096 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3097
3098 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3099 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3100 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3101 IEM_MC_REF_EFLAGS(pEFlags);
3102 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3103
3104 IEM_MC_ADVANCE_RIP();
3105 IEM_MC_END();
3106 return VINF_SUCCESS;
3107
3108 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3109 }
3110 }
3111 else
3112 {
3113 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3114
3115 switch (pIemCpu->enmEffOpSize)
3116 {
3117 case IEMMODE_16BIT:
3118 IEM_MC_BEGIN(4, 2);
3119 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3120 IEM_MC_ARG(uint16_t, u16Src, 1);
3121 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3122 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3123 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3124
3125 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3126 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3127 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3128 IEM_MC_FETCH_EFLAGS(EFlags);
3129 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3130 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3131
3132 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3133 IEM_MC_COMMIT_EFLAGS(EFlags);
3134 IEM_MC_ADVANCE_RIP();
3135 IEM_MC_END();
3136 return VINF_SUCCESS;
3137
3138 case IEMMODE_32BIT:
3139 IEM_MC_BEGIN(4, 2);
3140 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3141 IEM_MC_ARG(uint32_t, u32Src, 1);
3142 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3143 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3144 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3145
3146 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3147 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3148 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3149 IEM_MC_FETCH_EFLAGS(EFlags);
3150 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3151 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3152
3153 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3154 IEM_MC_COMMIT_EFLAGS(EFlags);
3155 IEM_MC_ADVANCE_RIP();
3156 IEM_MC_END();
3157 return VINF_SUCCESS;
3158
3159 case IEMMODE_64BIT:
3160 IEM_MC_BEGIN(4, 2);
3161 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3162 IEM_MC_ARG(uint64_t, u64Src, 1);
3163 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3164 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3165 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3166
3167 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3168 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3169 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3170 IEM_MC_FETCH_EFLAGS(EFlags);
3171 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3172 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3173
3174 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3175 IEM_MC_COMMIT_EFLAGS(EFlags);
3176 IEM_MC_ADVANCE_RIP();
3177 IEM_MC_END();
3178 return VINF_SUCCESS;
3179
3180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3181 }
3182 }
3183}
3184
3185
3186
3187/** Opcode 0x0f 0xa4. */
3188FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3189{
3190 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3191 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3192}
3193
3194
3195/** Opcode 0x0f 0xa7. */
3196FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3197{
3198 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3199 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3200}
3201
3202
3203/** Opcode 0x0f 0xa8. */
3204FNIEMOP_DEF(iemOp_push_gs)
3205{
3206 IEMOP_MNEMONIC("push gs");
3207 IEMOP_HLP_NO_LOCK_PREFIX();
3208 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3209}
3210
3211
3212/** Opcode 0x0f 0xa9. */
3213FNIEMOP_DEF(iemOp_pop_gs)
3214{
3215 IEMOP_MNEMONIC("pop gs");
3216 IEMOP_HLP_NO_LOCK_PREFIX();
3217 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3218}
3219
3220
3221/** Opcode 0x0f 0xaa. */
3222FNIEMOP_STUB(iemOp_rsm);
3223
3224
3225/** Opcode 0x0f 0xab. */
3226FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3227{
3228 IEMOP_MNEMONIC("bts Gv,Mp");
3229 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3230}
3231
3232
3233/** Opcode 0x0f 0xac. */
3234FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3235{
3236 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3237 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3238}
3239
3240
3241/** Opcode 0x0f 0xad. */
3242FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3243{
3244 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3245 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3246}
3247
3248
3249/** Opcode 0x0f 0xae. */
3250FNIEMOP_STUB(iemOp_Grp15);
3251
3252
3253/** Opcode 0x0f 0xaf. */
3254FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3255{
3256 IEMOP_MNEMONIC("imul Gv,Ev");
3257 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3258 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3259}
3260
3261
3262/** Opcode 0x0f 0xb0. */
3263FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3264/** Opcode 0x0f 0xb1. */
3265FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3266
3267
3268FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3269{
3270 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3271 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3272
3273 /* The source cannot be a register. */
3274 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3275 return IEMOP_RAISE_INVALID_OPCODE();
3276 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & bRm & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3277
3278 switch (pIemCpu->enmEffOpSize)
3279 {
3280 case IEMMODE_16BIT:
3281 IEM_MC_BEGIN(5, 1);
3282 IEM_MC_ARG(uint16_t, uSel, 0);
3283 IEM_MC_ARG(uint16_t, offSeg, 1);
3284 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3285 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3286 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3287 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3288 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3289 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3290 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
3291 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3292 IEM_MC_END();
3293 return VINF_SUCCESS;
3294
3295 case IEMMODE_32BIT:
3296 IEM_MC_BEGIN(5, 1);
3297 IEM_MC_ARG(uint16_t, uSel, 0);
3298 IEM_MC_ARG(uint32_t, offSeg, 1);
3299 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3300 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3301 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3302 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3303 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3304 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3305 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
3306 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3307 IEM_MC_END();
3308 return VINF_SUCCESS;
3309
3310 case IEMMODE_64BIT:
3311 IEM_MC_BEGIN(5, 1);
3312 IEM_MC_ARG(uint16_t, uSel, 0);
3313 IEM_MC_ARG(uint64_t, offSeg, 1);
3314 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3315 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3316 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3317 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3318 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3319 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3320 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
3321 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3322 IEM_MC_END();
3323 return VINF_SUCCESS;
3324
3325 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3326 }
3327}
3328
3329
3330/** Opcode 0x0f 0xb2. */
3331FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3332{
3333 IEMOP_MNEMONIC("lss Gv,Mp");
3334 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3335}
3336
3337
3338/** Opcode 0x0f 0xb3. */
3339FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3340{
3341 IEMOP_MNEMONIC("btr Gv,Mp");
3342 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3343}
3344
3345
3346/** Opcode 0x0f 0xb4. */
3347FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3348{
3349 IEMOP_MNEMONIC("lfs Gv,Mp");
3350 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3351}
3352
3353
3354/** Opcode 0x0f 0xb5. */
3355FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3356{
3357 IEMOP_MNEMONIC("lgs Gv,Mp");
3358 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3359}
3360
3361
3362/** Opcode 0x0f 0xb6. */
3363FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3364{
3365 IEMOP_MNEMONIC("movzx Gv,Eb");
3366
3367 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3368 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3369
3370 /*
3371 * If rm is denoting a register, no more instruction bytes.
3372 */
3373 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3374 {
3375 switch (pIemCpu->enmEffOpSize)
3376 {
3377 case IEMMODE_16BIT:
3378 IEM_MC_BEGIN(0, 1);
3379 IEM_MC_LOCAL(uint16_t, u16Value);
3380 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3381 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3382 IEM_MC_ADVANCE_RIP();
3383 IEM_MC_END();
3384 return VINF_SUCCESS;
3385
3386 case IEMMODE_32BIT:
3387 IEM_MC_BEGIN(0, 1);
3388 IEM_MC_LOCAL(uint32_t, u32Value);
3389 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3390 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3391 IEM_MC_ADVANCE_RIP();
3392 IEM_MC_END();
3393 return VINF_SUCCESS;
3394
3395 case IEMMODE_64BIT:
3396 IEM_MC_BEGIN(0, 1);
3397 IEM_MC_LOCAL(uint64_t, u64Value);
3398 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3399 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3400 IEM_MC_ADVANCE_RIP();
3401 IEM_MC_END();
3402 return VINF_SUCCESS;
3403
3404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3405 }
3406 }
3407 else
3408 {
3409 /*
3410 * We're loading a register from memory.
3411 */
3412 switch (pIemCpu->enmEffOpSize)
3413 {
3414 case IEMMODE_16BIT:
3415 IEM_MC_BEGIN(0, 2);
3416 IEM_MC_LOCAL(uint16_t, u16Value);
3417 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3418 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3419 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3420 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3421 IEM_MC_ADVANCE_RIP();
3422 IEM_MC_END();
3423 return VINF_SUCCESS;
3424
3425 case IEMMODE_32BIT:
3426 IEM_MC_BEGIN(0, 2);
3427 IEM_MC_LOCAL(uint32_t, u32Value);
3428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3429 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3430 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3431 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3432 IEM_MC_ADVANCE_RIP();
3433 IEM_MC_END();
3434 return VINF_SUCCESS;
3435
3436 case IEMMODE_64BIT:
3437 IEM_MC_BEGIN(0, 2);
3438 IEM_MC_LOCAL(uint64_t, u64Value);
3439 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3440 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3441 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3442 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3443 IEM_MC_ADVANCE_RIP();
3444 IEM_MC_END();
3445 return VINF_SUCCESS;
3446
3447 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3448 }
3449 }
3450}
3451
3452
3453/** Opcode 0x0f 0xb7. */
3454FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3455{
3456 IEMOP_MNEMONIC("movzx Gv,Ew");
3457
3458 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3459 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3460
3461 /** @todo Not entirely sure how the operand size prefix is handled here,
3462 * assuming that it will be ignored. Would be nice to have a few
3463 * test for this. */
3464 /*
3465 * If rm is denoting a register, no more instruction bytes.
3466 */
3467 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3468 {
3469 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3470 {
3471 IEM_MC_BEGIN(0, 1);
3472 IEM_MC_LOCAL(uint32_t, u32Value);
3473 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3474 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3475 IEM_MC_ADVANCE_RIP();
3476 IEM_MC_END();
3477 }
3478 else
3479 {
3480 IEM_MC_BEGIN(0, 1);
3481 IEM_MC_LOCAL(uint64_t, u64Value);
3482 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3483 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3484 IEM_MC_ADVANCE_RIP();
3485 IEM_MC_END();
3486 }
3487 }
3488 else
3489 {
3490 /*
3491 * We're loading a register from memory.
3492 */
3493 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3494 {
3495 IEM_MC_BEGIN(0, 2);
3496 IEM_MC_LOCAL(uint32_t, u32Value);
3497 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3498 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3499 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3500 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3501 IEM_MC_ADVANCE_RIP();
3502 IEM_MC_END();
3503 }
3504 else
3505 {
3506 IEM_MC_BEGIN(0, 2);
3507 IEM_MC_LOCAL(uint64_t, u64Value);
3508 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3509 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3510 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3511 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3512 IEM_MC_ADVANCE_RIP();
3513 IEM_MC_END();
3514 }
3515 }
3516 return VINF_SUCCESS;
3517}
3518
3519
3520/** Opcode 0x0f 0xb8. */
3521FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3522/** Opcode 0x0f 0xb9. */
3523FNIEMOP_STUB(iemOp_Grp10);
3524
3525
3526/** Opcode 0x0f 0xba. */
3527FNIEMOP_DEF(iemOp_Grp8)
3528{
3529 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3530 PCIEMOPBINSIZES pImpl;
3531 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3532 {
3533 case 0: case 1: case 2: case 3:
3534 return IEMOP_RAISE_INVALID_OPCODE();
3535 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3536 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3537 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3538 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3540 }
3541 IEMOP_HLP_NO_LOCK_PREFIX();
3542 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3543
3544 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3545 {
3546 /* register destination. */
3547 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3548 IEMOP_HLP_NO_LOCK_PREFIX();
3549
3550 switch (pIemCpu->enmEffOpSize)
3551 {
3552 case IEMMODE_16BIT:
3553 IEM_MC_BEGIN(3, 0);
3554 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3555 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3556 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3557
3558 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3559 IEM_MC_REF_EFLAGS(pEFlags);
3560 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3561
3562 IEM_MC_ADVANCE_RIP();
3563 IEM_MC_END();
3564 return VINF_SUCCESS;
3565
3566 case IEMMODE_32BIT:
3567 IEM_MC_BEGIN(3, 0);
3568 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3569 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3570 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3571
3572 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3573 IEM_MC_REF_EFLAGS(pEFlags);
3574 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3575
3576 IEM_MC_ADVANCE_RIP();
3577 IEM_MC_END();
3578 return VINF_SUCCESS;
3579
3580 case IEMMODE_64BIT:
3581 IEM_MC_BEGIN(3, 0);
3582 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3583 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3584 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3585
3586 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3587 IEM_MC_REF_EFLAGS(pEFlags);
3588 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3589
3590 IEM_MC_ADVANCE_RIP();
3591 IEM_MC_END();
3592 return VINF_SUCCESS;
3593
3594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3595 }
3596 }
3597 else
3598 {
3599 /* memory destination. */
3600
3601 uint32_t fAccess;
3602 if (pImpl->pfnLockedU16)
3603 fAccess = IEM_ACCESS_DATA_RW;
3604 else /* BT */
3605 {
3606 IEMOP_HLP_NO_LOCK_PREFIX();
3607 fAccess = IEM_ACCESS_DATA_R;
3608 }
3609
3610 /** @todo test negative bit offsets! */
3611 switch (pIemCpu->enmEffOpSize)
3612 {
3613 case IEMMODE_16BIT:
3614 IEM_MC_BEGIN(3, 1);
3615 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3616 IEM_MC_ARG(uint16_t, u16Src, 1);
3617 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3618 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3619
3620 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3621 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3622 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3623 IEM_MC_FETCH_EFLAGS(EFlags);
3624 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3625 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3626 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3627 else
3628 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3629 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3630
3631 IEM_MC_COMMIT_EFLAGS(EFlags);
3632 IEM_MC_ADVANCE_RIP();
3633 IEM_MC_END();
3634 return VINF_SUCCESS;
3635
3636 case IEMMODE_32BIT:
3637 IEM_MC_BEGIN(3, 1);
3638 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3639 IEM_MC_ARG(uint32_t, u32Src, 1);
3640 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3642
3643 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3644 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3645 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3646 IEM_MC_FETCH_EFLAGS(EFlags);
3647 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3648 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3649 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3650 else
3651 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3652 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3653
3654 IEM_MC_COMMIT_EFLAGS(EFlags);
3655 IEM_MC_ADVANCE_RIP();
3656 IEM_MC_END();
3657 return VINF_SUCCESS;
3658
3659 case IEMMODE_64BIT:
3660 IEM_MC_BEGIN(3, 1);
3661 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3662 IEM_MC_ARG(uint64_t, u64Src, 1);
3663 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3664 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3665
3666 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3667 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3668 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3669 IEM_MC_FETCH_EFLAGS(EFlags);
3670 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3671 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3672 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3673 else
3674 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3675 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3676
3677 IEM_MC_COMMIT_EFLAGS(EFlags);
3678 IEM_MC_ADVANCE_RIP();
3679 IEM_MC_END();
3680 return VINF_SUCCESS;
3681
3682 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3683 }
3684 }
3685
3686}
3687
3688
3689/** Opcode 0x0f 0xbb. */
3690FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3691{
3692 IEMOP_MNEMONIC("btc Gv,Mp");
3693 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3694}
3695
3696
3697/** Opcode 0x0f 0xbc. */
3698FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3699{
3700 IEMOP_MNEMONIC("bsf Gv,Ev");
3701 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3702 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3703}
3704
3705
3706/** Opcode 0x0f 0xbd. */
3707FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3708{
3709 IEMOP_MNEMONIC("bsr Gv,Ev");
3710 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3711 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3712}
3713
3714
3715/** Opcode 0x0f 0xbe. */
3716FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3717{
3718 IEMOP_MNEMONIC("movsx Gv,Eb");
3719
3720 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3721 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3722
3723 /*
3724 * If rm is denoting a register, no more instruction bytes.
3725 */
3726 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3727 {
3728 switch (pIemCpu->enmEffOpSize)
3729 {
3730 case IEMMODE_16BIT:
3731 IEM_MC_BEGIN(0, 1);
3732 IEM_MC_LOCAL(uint16_t, u16Value);
3733 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3734 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3735 IEM_MC_ADVANCE_RIP();
3736 IEM_MC_END();
3737 return VINF_SUCCESS;
3738
3739 case IEMMODE_32BIT:
3740 IEM_MC_BEGIN(0, 1);
3741 IEM_MC_LOCAL(uint32_t, u32Value);
3742 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3743 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3744 IEM_MC_ADVANCE_RIP();
3745 IEM_MC_END();
3746 return VINF_SUCCESS;
3747
3748 case IEMMODE_64BIT:
3749 IEM_MC_BEGIN(0, 1);
3750 IEM_MC_LOCAL(uint64_t, u64Value);
3751 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3752 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3753 IEM_MC_ADVANCE_RIP();
3754 IEM_MC_END();
3755 return VINF_SUCCESS;
3756
3757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3758 }
3759 }
3760 else
3761 {
3762 /*
3763 * We're loading a register from memory.
3764 */
3765 switch (pIemCpu->enmEffOpSize)
3766 {
3767 case IEMMODE_16BIT:
3768 IEM_MC_BEGIN(0, 2);
3769 IEM_MC_LOCAL(uint16_t, u16Value);
3770 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3771 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3772 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3773 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3774 IEM_MC_ADVANCE_RIP();
3775 IEM_MC_END();
3776 return VINF_SUCCESS;
3777
3778 case IEMMODE_32BIT:
3779 IEM_MC_BEGIN(0, 2);
3780 IEM_MC_LOCAL(uint32_t, u32Value);
3781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3782 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3783 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3784 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3785 IEM_MC_ADVANCE_RIP();
3786 IEM_MC_END();
3787 return VINF_SUCCESS;
3788
3789 case IEMMODE_64BIT:
3790 IEM_MC_BEGIN(0, 2);
3791 IEM_MC_LOCAL(uint64_t, u64Value);
3792 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3793 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3794 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3795 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3796 IEM_MC_ADVANCE_RIP();
3797 IEM_MC_END();
3798 return VINF_SUCCESS;
3799
3800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3801 }
3802 }
3803}
3804
3805
3806/** Opcode 0x0f 0xbf. */
3807FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
3808{
3809 IEMOP_MNEMONIC("movsx Gv,Ew");
3810
3811 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3812 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3813
3814 /** @todo Not entirely sure how the operand size prefix is handled here,
3815 * assuming that it will be ignored. Would be nice to have a few
3816 * test for this. */
3817 /*
3818 * If rm is denoting a register, no more instruction bytes.
3819 */
3820 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3821 {
3822 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3823 {
3824 IEM_MC_BEGIN(0, 1);
3825 IEM_MC_LOCAL(uint32_t, u32Value);
3826 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3827 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3828 IEM_MC_ADVANCE_RIP();
3829 IEM_MC_END();
3830 }
3831 else
3832 {
3833 IEM_MC_BEGIN(0, 1);
3834 IEM_MC_LOCAL(uint64_t, u64Value);
3835 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3836 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3837 IEM_MC_ADVANCE_RIP();
3838 IEM_MC_END();
3839 }
3840 }
3841 else
3842 {
3843 /*
3844 * We're loading a register from memory.
3845 */
3846 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3847 {
3848 IEM_MC_BEGIN(0, 2);
3849 IEM_MC_LOCAL(uint32_t, u32Value);
3850 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3851 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3852 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3853 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3854 IEM_MC_ADVANCE_RIP();
3855 IEM_MC_END();
3856 }
3857 else
3858 {
3859 IEM_MC_BEGIN(0, 2);
3860 IEM_MC_LOCAL(uint64_t, u64Value);
3861 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3862 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3863 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3864 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3865 IEM_MC_ADVANCE_RIP();
3866 IEM_MC_END();
3867 }
3868 }
3869 return VINF_SUCCESS;
3870}
3871
3872
3873/** Opcode 0x0f 0xc0. */
3874FNIEMOP_STUB(iemOp_xadd_Eb_Gb);
3875/** Opcode 0x0f 0xc1. */
3876FNIEMOP_STUB(iemOp_xadd_Ev_Gv);
3877/** Opcode 0x0f 0xc2. */
3878FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
3879/** Opcode 0x0f 0xc3. */
3880FNIEMOP_STUB(iemOp_movnti_My_Gy);
3881/** Opcode 0x0f 0xc4. */
3882FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
3883/** Opcode 0x0f 0xc5. */
3884FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
3885/** Opcode 0x0f 0xc6. */
3886FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
3887/** Opcode 0x0f 0xc7. */
3888FNIEMOP_STUB(iemOp_Grp9);
3889/** Opcode 0x0f 0xc8. */
3890FNIEMOP_STUB(iemOp_bswap_rAX_r8);
3891/** Opcode 0x0f 0xc9. */
3892FNIEMOP_STUB(iemOp_bswap_rCX_r9);
3893/** Opcode 0x0f 0xca. */
3894FNIEMOP_STUB(iemOp_bswap_rDX_r10);
3895/** Opcode 0x0f 0xcb. */
3896FNIEMOP_STUB(iemOp_bswap_rBX_r11);
3897/** Opcode 0x0f 0xcc. */
3898FNIEMOP_STUB(iemOp_bswap_rSP_r12);
3899/** Opcode 0x0f 0xcd. */
3900FNIEMOP_STUB(iemOp_bswap_rBP_r13);
3901/** Opcode 0x0f 0xce. */
3902FNIEMOP_STUB(iemOp_bswap_rSI_r14);
3903/** Opcode 0x0f 0xcf. */
3904FNIEMOP_STUB(iemOp_bswap_rDI_r15);
3905/** Opcode 0x0f 0xd0. */
3906FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
3907/** Opcode 0x0f 0xd1. */
3908FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
3909/** Opcode 0x0f 0xd2. */
3910FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
3911/** Opcode 0x0f 0xd3. */
3912FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
3913/** Opcode 0x0f 0xd4. */
3914FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
3915/** Opcode 0x0f 0xd5. */
3916FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
3917/** Opcode 0x0f 0xd6. */
3918FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
3919/** Opcode 0x0f 0xd7. */
3920FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
3921/** Opcode 0x0f 0xd8. */
3922FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
3923/** Opcode 0x0f 0xd9. */
3924FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
3925/** Opcode 0x0f 0xda. */
3926FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
3927/** Opcode 0x0f 0xdb. */
3928FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
3929/** Opcode 0x0f 0xdc. */
3930FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
3931/** Opcode 0x0f 0xdd. */
3932FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
3933/** Opcode 0x0f 0xde. */
3934FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
3935/** Opcode 0x0f 0xdf. */
3936FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
3937/** Opcode 0x0f 0xe0. */
3938FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
3939/** Opcode 0x0f 0xe1. */
3940FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
3941/** Opcode 0x0f 0xe2. */
3942FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
3943/** Opcode 0x0f 0xe3. */
3944FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
3945/** Opcode 0x0f 0xe4. */
3946FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
3947/** Opcode 0x0f 0xe5. */
3948FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
3949/** Opcode 0x0f 0xe6. */
3950FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
3951/** Opcode 0x0f 0xe7. */
3952FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
3953/** Opcode 0x0f 0xe8. */
3954FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
3955/** Opcode 0x0f 0xe9. */
3956FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
3957/** Opcode 0x0f 0xea. */
3958FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
3959/** Opcode 0x0f 0xeb. */
3960FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
3961/** Opcode 0x0f 0xec. */
3962FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
3963/** Opcode 0x0f 0xed. */
3964FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
3965/** Opcode 0x0f 0xee. */
3966FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
3967/** Opcode 0x0f 0xef. */
3968FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
3969/** Opcode 0x0f 0xf0. */
3970FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
3971/** Opcode 0x0f 0xf1. */
3972FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
3973/** Opcode 0x0f 0xf2. */
3974FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
3975/** Opcode 0x0f 0xf3. */
3976FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
3977/** Opcode 0x0f 0xf4. */
3978FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
3979/** Opcode 0x0f 0xf5. */
3980FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
3981/** Opcode 0x0f 0xf6. */
3982FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
3983/** Opcode 0x0f 0xf7. */
3984FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
3985/** Opcode 0x0f 0xf8. */
3986FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
3987/** Opcode 0x0f 0xf9. */
3988FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
3989/** Opcode 0x0f 0xfa. */
3990FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
3991/** Opcode 0x0f 0xfb. */
3992FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
3993/** Opcode 0x0f 0xfc. */
3994FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
3995/** Opcode 0x0f 0xfd. */
3996FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
3997/** Opcode 0x0f 0xfe. */
3998FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
3999
4000
4001const PFNIEMOP g_apfnTwoByteMap[256] =
4002{
4003 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4004 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4005 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4006 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_prefetch, iemOp_femms, iemOp_3Dnow,
4007 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4008 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4009 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4010 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4011 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4012 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4013 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4014 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4015 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4016 /* 0x1c */ iemOp_Invalid, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4017 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4018 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4019 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4020 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4021 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4022 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4023 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4024 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4025 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4026 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4027 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4028 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4029 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4030 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4031 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4032 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4033 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4034 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4035 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4036 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4037 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4038 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4039 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4040 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4041 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4042 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4043 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4044 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4045 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4046 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4047 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4048 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4049 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4050 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4051 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4052 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4053 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4054 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4055 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4056 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4057 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4058 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4059 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4060 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4061 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4062 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4063 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4064 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4065 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4066 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4067 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4068 /* 0x71 */ iemOp_Grp12,
4069 /* 0x72 */ iemOp_Grp13,
4070 /* 0x73 */ iemOp_Grp14,
4071 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4072 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4073 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4074 /* 0x77 */ iemOp_emms,
4075 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4076 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4077 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4078 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4079 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4080 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4081 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4082 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4083 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4084 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4085 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4086 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4087 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4088 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4089 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4090 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4091 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4092 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4093 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4094 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4095 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4096 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4097 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4098 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4099 /* 0xc3 */ iemOp_movnti_My_Gy,
4100 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4101 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4102 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4103 /* 0xc7 */ iemOp_Grp9,
4104 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4105 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4106 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4107 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4108 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4109 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4110 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4111 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4112 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4113 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4114 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4115 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4116 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4117 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4118 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4119 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4120 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4121 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4122 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4123 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4124 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4125 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4126 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4127 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4128 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4129 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4130 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4131 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4132 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4133 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4134 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4135 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4136 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4137 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4138 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4139 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4140 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4141 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4142 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4143 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4144 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4145 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4146 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4147 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4148 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4149 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4150 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4151 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4152 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4153 /* 0xff */ iemOp_Invalid
4154};
4155
4156/** @} */
4157
4158
4159/** @name One byte opcodes.
4160 *
4161 * @{
4162 */
4163
4164/** Opcode 0x00. */
4165FNIEMOP_DEF(iemOp_add_Eb_Gb)
4166{
4167 IEMOP_MNEMONIC("add Eb,Gb");
4168 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4169}
4170
4171
4172/** Opcode 0x01. */
4173FNIEMOP_DEF(iemOp_add_Ev_Gv)
4174{
4175 IEMOP_MNEMONIC("add Ev,Gv");
4176 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4177}
4178
4179
4180/** Opcode 0x02. */
4181FNIEMOP_DEF(iemOp_add_Gb_Eb)
4182{
4183 IEMOP_MNEMONIC("add Gb,Eb");
4184 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4185}
4186
4187
4188/** Opcode 0x03. */
4189FNIEMOP_DEF(iemOp_add_Gv_Ev)
4190{
4191 IEMOP_MNEMONIC("add Gv,Ev");
4192 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4193}
4194
4195
4196/** Opcode 0x04. */
4197FNIEMOP_DEF(iemOp_add_Al_Ib)
4198{
4199 IEMOP_MNEMONIC("add al,Ib");
4200 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4201}
4202
4203
4204/** Opcode 0x05. */
4205FNIEMOP_DEF(iemOp_add_eAX_Iz)
4206{
4207 IEMOP_MNEMONIC("add rAX,Iz");
4208 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4209}
4210
4211
4212/** Opcode 0x06. */
4213FNIEMOP_DEF(iemOp_push_ES)
4214{
4215 IEMOP_MNEMONIC("push es");
4216 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4217}
4218
4219
4220/** Opcode 0x07. */
4221FNIEMOP_DEF(iemOp_pop_ES)
4222{
4223 IEMOP_MNEMONIC("pop es");
4224 IEMOP_HLP_NO_64BIT();
4225 IEMOP_HLP_NO_LOCK_PREFIX();
4226 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4227}
4228
4229
4230/** Opcode 0x08. */
4231FNIEMOP_DEF(iemOp_or_Eb_Gb)
4232{
4233 IEMOP_MNEMONIC("or Eb,Gb");
4234 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4235 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4236}
4237
4238
4239/** Opcode 0x09. */
4240FNIEMOP_DEF(iemOp_or_Ev_Gv)
4241{
4242 IEMOP_MNEMONIC("or Ev,Gv ");
4243 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4244 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4245}
4246
4247
4248/** Opcode 0x0a. */
4249FNIEMOP_DEF(iemOp_or_Gb_Eb)
4250{
4251 IEMOP_MNEMONIC("or Gb,Eb");
4252 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4253 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4254}
4255
4256
4257/** Opcode 0x0b. */
4258FNIEMOP_DEF(iemOp_or_Gv_Ev)
4259{
4260 IEMOP_MNEMONIC("or Gv,Ev");
4261 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4262 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4263}
4264
4265
4266/** Opcode 0x0c. */
4267FNIEMOP_DEF(iemOp_or_Al_Ib)
4268{
4269 IEMOP_MNEMONIC("or al,Ib");
4270 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4271 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4272}
4273
4274
4275/** Opcode 0x0d. */
4276FNIEMOP_DEF(iemOp_or_eAX_Iz)
4277{
4278 IEMOP_MNEMONIC("or rAX,Iz");
4279 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4280 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4281}
4282
4283
4284/** Opcode 0x0e. */
4285FNIEMOP_DEF(iemOp_push_CS)
4286{
4287 IEMOP_MNEMONIC("push cs");
4288 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4289}
4290
4291
4292/** Opcode 0x0f. */
4293FNIEMOP_DEF(iemOp_2byteEscape)
4294{
4295 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4296 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4297}
4298
4299/** Opcode 0x10. */
4300FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4301{
4302 IEMOP_MNEMONIC("adc Eb,Gb");
4303 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4304}
4305
4306
4307/** Opcode 0x11. */
4308FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4309{
4310 IEMOP_MNEMONIC("adc Ev,Gv");
4311 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4312}
4313
4314
4315/** Opcode 0x12. */
4316FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4317{
4318 IEMOP_MNEMONIC("adc Gb,Eb");
4319 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4320}
4321
4322
4323/** Opcode 0x13. */
4324FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4325{
4326 IEMOP_MNEMONIC("adc Gv,Ev");
4327 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4328}
4329
4330
4331/** Opcode 0x14. */
4332FNIEMOP_DEF(iemOp_adc_Al_Ib)
4333{
4334 IEMOP_MNEMONIC("adc al,Ib");
4335 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4336}
4337
4338
4339/** Opcode 0x15. */
4340FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4341{
4342 IEMOP_MNEMONIC("adc rAX,Iz");
4343 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4344}
4345
4346
4347/** Opcode 0x16. */
4348FNIEMOP_DEF(iemOp_push_SS)
4349{
4350 IEMOP_MNEMONIC("push ss");
4351 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4352}
4353
4354
4355/** Opcode 0x17. */
4356FNIEMOP_DEF(iemOp_pop_SS)
4357{
4358 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4359 IEMOP_HLP_NO_LOCK_PREFIX();
4360 IEMOP_HLP_NO_64BIT();
4361 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4362}
4363
4364
4365/** Opcode 0x18. */
4366FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4367{
4368 IEMOP_MNEMONIC("sbb Eb,Gb");
4369 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4370}
4371
4372
4373/** Opcode 0x19. */
4374FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4375{
4376 IEMOP_MNEMONIC("sbb Ev,Gv");
4377 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4378}
4379
4380
4381/** Opcode 0x1a. */
4382FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4383{
4384 IEMOP_MNEMONIC("sbb Gb,Eb");
4385 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4386}
4387
4388
4389/** Opcode 0x1b. */
4390FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4391{
4392 IEMOP_MNEMONIC("sbb Gv,Ev");
4393 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4394}
4395
4396
4397/** Opcode 0x1c. */
4398FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4399{
4400 IEMOP_MNEMONIC("sbb al,Ib");
4401 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4402}
4403
4404
4405/** Opcode 0x1d. */
4406FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4407{
4408 IEMOP_MNEMONIC("sbb rAX,Iz");
4409 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4410}
4411
4412
4413/** Opcode 0x1e. */
4414FNIEMOP_DEF(iemOp_push_DS)
4415{
4416 IEMOP_MNEMONIC("push ds");
4417 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4418}
4419
4420
4421/** Opcode 0x1f. */
4422FNIEMOP_DEF(iemOp_pop_DS)
4423{
4424 IEMOP_MNEMONIC("pop ds");
4425 IEMOP_HLP_NO_LOCK_PREFIX();
4426 IEMOP_HLP_NO_64BIT();
4427 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4428}
4429
4430
4431/** Opcode 0x20. */
4432FNIEMOP_DEF(iemOp_and_Eb_Gb)
4433{
4434 IEMOP_MNEMONIC("and Eb,Gb");
4435 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4436 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4437}
4438
4439
4440/** Opcode 0x21. */
4441FNIEMOP_DEF(iemOp_and_Ev_Gv)
4442{
4443 IEMOP_MNEMONIC("and Ev,Gv");
4444 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4445 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4446}
4447
4448
4449/** Opcode 0x22. */
4450FNIEMOP_DEF(iemOp_and_Gb_Eb)
4451{
4452 IEMOP_MNEMONIC("and Gb,Eb");
4453 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4454 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4455}
4456
4457
4458/** Opcode 0x23. */
4459FNIEMOP_DEF(iemOp_and_Gv_Ev)
4460{
4461 IEMOP_MNEMONIC("and Gv,Ev");
4462 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4463 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
4464}
4465
4466
4467/** Opcode 0x24. */
4468FNIEMOP_DEF(iemOp_and_Al_Ib)
4469{
4470 IEMOP_MNEMONIC("and al,Ib");
4471 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4472 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
4473}
4474
4475
4476/** Opcode 0x25. */
4477FNIEMOP_DEF(iemOp_and_eAX_Iz)
4478{
4479 IEMOP_MNEMONIC("and rAX,Iz");
4480 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4481 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
4482}
4483
4484
4485/** Opcode 0x26. */
4486FNIEMOP_DEF(iemOp_seg_ES)
4487{
4488 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
4489 pIemCpu->iEffSeg = X86_SREG_ES;
4490
4491 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4492 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4493}
4494
4495
4496/** Opcode 0x27. */
4497FNIEMOP_STUB(iemOp_daa);
4498
4499
4500/** Opcode 0x28. */
4501FNIEMOP_DEF(iemOp_sub_Eb_Gb)
4502{
4503 IEMOP_MNEMONIC("sub Eb,Gb");
4504 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
4505}
4506
4507
4508/** Opcode 0x29. */
4509FNIEMOP_DEF(iemOp_sub_Ev_Gv)
4510{
4511 IEMOP_MNEMONIC("sub Ev,Gv");
4512 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
4513}
4514
4515
4516/** Opcode 0x2a. */
4517FNIEMOP_DEF(iemOp_sub_Gb_Eb)
4518{
4519 IEMOP_MNEMONIC("sub Gb,Eb");
4520 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
4521}
4522
4523
4524/** Opcode 0x2b. */
4525FNIEMOP_DEF(iemOp_sub_Gv_Ev)
4526{
4527 IEMOP_MNEMONIC("sub Gv,Ev");
4528 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
4529}
4530
4531
4532/** Opcode 0x2c. */
4533FNIEMOP_DEF(iemOp_sub_Al_Ib)
4534{
4535 IEMOP_MNEMONIC("sub al,Ib");
4536 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
4537}
4538
4539
4540/** Opcode 0x2d. */
4541FNIEMOP_DEF(iemOp_sub_eAX_Iz)
4542{
4543 IEMOP_MNEMONIC("sub rAX,Iz");
4544 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
4545}
4546
4547
4548/** Opcode 0x2e. */
4549FNIEMOP_DEF(iemOp_seg_CS)
4550{
4551 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
4552 pIemCpu->iEffSeg = X86_SREG_CS;
4553
4554 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4555 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4556}
4557
4558
4559/** Opcode 0x2f. */
4560FNIEMOP_STUB(iemOp_das);
4561
4562
4563/** Opcode 0x30. */
4564FNIEMOP_DEF(iemOp_xor_Eb_Gb)
4565{
4566 IEMOP_MNEMONIC("xor Eb,Gb");
4567 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4568 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
4569}
4570
4571
4572/** Opcode 0x31. */
4573FNIEMOP_DEF(iemOp_xor_Ev_Gv)
4574{
4575 IEMOP_MNEMONIC("xor Ev,Gv");
4576 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4577 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
4578}
4579
4580
4581/** Opcode 0x32. */
4582FNIEMOP_DEF(iemOp_xor_Gb_Eb)
4583{
4584 IEMOP_MNEMONIC("xor Gb,Eb");
4585 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4586 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
4587}
4588
4589
4590/** Opcode 0x33. */
4591FNIEMOP_DEF(iemOp_xor_Gv_Ev)
4592{
4593 IEMOP_MNEMONIC("xor Gv,Ev");
4594 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4595 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
4596}
4597
4598
4599/** Opcode 0x34. */
4600FNIEMOP_DEF(iemOp_xor_Al_Ib)
4601{
4602 IEMOP_MNEMONIC("xor al,Ib");
4603 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4604 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
4605}
4606
4607
4608/** Opcode 0x35. */
4609FNIEMOP_DEF(iemOp_xor_eAX_Iz)
4610{
4611 IEMOP_MNEMONIC("xor rAX,Iz");
4612 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4613 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
4614}
4615
4616
4617/** Opcode 0x36. */
4618FNIEMOP_DEF(iemOp_seg_SS)
4619{
4620 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
4621 pIemCpu->iEffSeg = X86_SREG_SS;
4622
4623 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4624 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4625}
4626
4627
4628/** Opcode 0x37. */
4629FNIEMOP_STUB(iemOp_aaa);
4630
4631
4632/** Opcode 0x38. */
4633FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
4634{
4635 IEMOP_MNEMONIC("cmp Eb,Gb");
4636 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4637 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
4638}
4639
4640
4641/** Opcode 0x39. */
4642FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
4643{
4644 IEMOP_MNEMONIC("cmp Ev,Gv");
4645 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4646 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
4647}
4648
4649
4650/** Opcode 0x3a. */
4651FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
4652{
4653 IEMOP_MNEMONIC("cmp Gb,Eb");
4654 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
4655}
4656
4657
4658/** Opcode 0x3b. */
4659FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
4660{
4661 IEMOP_MNEMONIC("cmp Gv,Ev");
4662 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
4663}
4664
4665
4666/** Opcode 0x3c. */
4667FNIEMOP_DEF(iemOp_cmp_Al_Ib)
4668{
4669 IEMOP_MNEMONIC("cmp al,Ib");
4670 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
4671}
4672
4673
4674/** Opcode 0x3d. */
4675FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
4676{
4677 IEMOP_MNEMONIC("cmp rAX,Iz");
4678 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
4679}
4680
4681
4682/** Opcode 0x3e. */
4683FNIEMOP_DEF(iemOp_seg_DS)
4684{
4685 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
4686 pIemCpu->iEffSeg = X86_SREG_DS;
4687
4688 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4689 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4690}
4691
4692
4693/** Opcode 0x3f. */
4694FNIEMOP_STUB(iemOp_aas);
4695
4696/**
4697 * Common 'inc/dec/not/neg register' helper.
4698 */
4699FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
4700{
4701 IEMOP_HLP_NO_LOCK_PREFIX();
4702 switch (pIemCpu->enmEffOpSize)
4703 {
4704 case IEMMODE_16BIT:
4705 IEM_MC_BEGIN(2, 0);
4706 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4707 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4708 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
4709 IEM_MC_REF_EFLAGS(pEFlags);
4710 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
4711 IEM_MC_ADVANCE_RIP();
4712 IEM_MC_END();
4713 return VINF_SUCCESS;
4714
4715 case IEMMODE_32BIT:
4716 IEM_MC_BEGIN(2, 0);
4717 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4718 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4719 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4720 IEM_MC_REF_EFLAGS(pEFlags);
4721 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
4722 IEM_MC_ADVANCE_RIP();
4723 IEM_MC_END();
4724 return VINF_SUCCESS;
4725
4726 case IEMMODE_64BIT:
4727 IEM_MC_BEGIN(2, 0);
4728 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4729 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4730 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4731 IEM_MC_REF_EFLAGS(pEFlags);
4732 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
4733 IEM_MC_ADVANCE_RIP();
4734 IEM_MC_END();
4735 return VINF_SUCCESS;
4736 }
4737 return VINF_SUCCESS;
4738}
4739
4740
4741/** Opcode 0x40. */
4742FNIEMOP_DEF(iemOp_inc_eAX)
4743{
4744 /*
4745 * This is a REX prefix in 64-bit mode.
4746 */
4747 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4748 {
4749 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
4750
4751 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4752 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4753 }
4754
4755 IEMOP_MNEMONIC("inc eAX");
4756 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
4757}
4758
4759
4760/** Opcode 0x41. */
4761FNIEMOP_DEF(iemOp_inc_eCX)
4762{
4763 /*
4764 * This is a REX prefix in 64-bit mode.
4765 */
4766 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4767 {
4768 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
4769 pIemCpu->uRexB = 1 << 3;
4770
4771 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4772 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4773 }
4774
4775 IEMOP_MNEMONIC("inc eCX");
4776 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
4777}
4778
4779
4780/** Opcode 0x42. */
4781FNIEMOP_DEF(iemOp_inc_eDX)
4782{
4783 /*
4784 * This is a REX prefix in 64-bit mode.
4785 */
4786 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4787 {
4788 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
4789 pIemCpu->uRexIndex = 1 << 3;
4790
4791 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4792 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4793 }
4794
4795 IEMOP_MNEMONIC("inc eDX");
4796 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
4797}
4798
4799
4800
4801/** Opcode 0x43. */
4802FNIEMOP_DEF(iemOp_inc_eBX)
4803{
4804 /*
4805 * This is a REX prefix in 64-bit mode.
4806 */
4807 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4808 {
4809 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
4810 pIemCpu->uRexB = 1 << 3;
4811 pIemCpu->uRexIndex = 1 << 3;
4812
4813 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4814 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4815 }
4816
4817 IEMOP_MNEMONIC("inc eBX");
4818 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
4819}
4820
4821
4822/** Opcode 0x44. */
4823FNIEMOP_DEF(iemOp_inc_eSP)
4824{
4825 /*
4826 * This is a REX prefix in 64-bit mode.
4827 */
4828 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4829 {
4830 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
4831 pIemCpu->uRexReg = 1 << 3;
4832
4833 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4834 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4835 }
4836
4837 IEMOP_MNEMONIC("inc eSP");
4838 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
4839}
4840
4841
4842/** Opcode 0x45. */
4843FNIEMOP_DEF(iemOp_inc_eBP)
4844{
4845 /*
4846 * This is a REX prefix in 64-bit mode.
4847 */
4848 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4849 {
4850 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
4851 pIemCpu->uRexReg = 1 << 3;
4852 pIemCpu->uRexB = 1 << 3;
4853
4854 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4855 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4856 }
4857
4858 IEMOP_MNEMONIC("inc eBP");
4859 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
4860}
4861
4862
4863/** Opcode 0x46. */
4864FNIEMOP_DEF(iemOp_inc_eSI)
4865{
4866 /*
4867 * This is a REX prefix in 64-bit mode.
4868 */
4869 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4870 {
4871 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
4872 pIemCpu->uRexReg = 1 << 3;
4873 pIemCpu->uRexIndex = 1 << 3;
4874
4875 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4876 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4877 }
4878
4879 IEMOP_MNEMONIC("inc eSI");
4880 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
4881}
4882
4883
4884/** Opcode 0x47. */
4885FNIEMOP_DEF(iemOp_inc_eDI)
4886{
4887 /*
4888 * This is a REX prefix in 64-bit mode.
4889 */
4890 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4891 {
4892 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
4893 pIemCpu->uRexReg = 1 << 3;
4894 pIemCpu->uRexB = 1 << 3;
4895 pIemCpu->uRexIndex = 1 << 3;
4896
4897 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4898 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4899 }
4900
4901 IEMOP_MNEMONIC("inc eDI");
4902 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
4903}
4904
4905
4906/** Opcode 0x48. */
4907FNIEMOP_DEF(iemOp_dec_eAX)
4908{
4909 /*
4910 * This is a REX prefix in 64-bit mode.
4911 */
4912 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4913 {
4914 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
4915 iemRecalEffOpSize(pIemCpu);
4916
4917 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4918 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4919 }
4920
4921 IEMOP_MNEMONIC("dec eAX");
4922 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
4923}
4924
4925
4926/** Opcode 0x49. */
4927FNIEMOP_DEF(iemOp_dec_eCX)
4928{
4929 /*
4930 * This is a REX prefix in 64-bit mode.
4931 */
4932 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4933 {
4934 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
4935 pIemCpu->uRexB = 1 << 3;
4936 iemRecalEffOpSize(pIemCpu);
4937
4938 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4939 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4940 }
4941
4942 IEMOP_MNEMONIC("dec eCX");
4943 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
4944}
4945
4946
4947/** Opcode 0x4a. */
4948FNIEMOP_DEF(iemOp_dec_eDX)
4949{
4950 /*
4951 * This is a REX prefix in 64-bit mode.
4952 */
4953 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4954 {
4955 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
4956 pIemCpu->uRexIndex = 1 << 3;
4957 iemRecalEffOpSize(pIemCpu);
4958
4959 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4960 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4961 }
4962
4963 IEMOP_MNEMONIC("dec eDX");
4964 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
4965}
4966
4967
4968/** Opcode 0x4b. */
4969FNIEMOP_DEF(iemOp_dec_eBX)
4970{
4971 /*
4972 * This is a REX prefix in 64-bit mode.
4973 */
4974 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4975 {
4976 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
4977 pIemCpu->uRexB = 1 << 3;
4978 pIemCpu->uRexIndex = 1 << 3;
4979 iemRecalEffOpSize(pIemCpu);
4980
4981 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4982 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4983 }
4984
4985 IEMOP_MNEMONIC("dec eBX");
4986 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
4987}
4988
4989
4990/** Opcode 0x4c. */
4991FNIEMOP_DEF(iemOp_dec_eSP)
4992{
4993 /*
4994 * This is a REX prefix in 64-bit mode.
4995 */
4996 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4997 {
4998 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
4999 pIemCpu->uRexReg = 1 << 3;
5000 iemRecalEffOpSize(pIemCpu);
5001
5002 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5003 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5004 }
5005
5006 IEMOP_MNEMONIC("dec eSP");
5007 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5008}
5009
5010
5011/** Opcode 0x4d. */
5012FNIEMOP_DEF(iemOp_dec_eBP)
5013{
5014 /*
5015 * This is a REX prefix in 64-bit mode.
5016 */
5017 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5018 {
5019 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5020 pIemCpu->uRexReg = 1 << 3;
5021 pIemCpu->uRexB = 1 << 3;
5022 iemRecalEffOpSize(pIemCpu);
5023
5024 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5025 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5026 }
5027
5028 IEMOP_MNEMONIC("dec eBP");
5029 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5030}
5031
5032
5033/** Opcode 0x4e. */
5034FNIEMOP_DEF(iemOp_dec_eSI)
5035{
5036 /*
5037 * This is a REX prefix in 64-bit mode.
5038 */
5039 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5040 {
5041 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5042 pIemCpu->uRexReg = 1 << 3;
5043 pIemCpu->uRexIndex = 1 << 3;
5044 iemRecalEffOpSize(pIemCpu);
5045
5046 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5047 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5048 }
5049
5050 IEMOP_MNEMONIC("dec eSI");
5051 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5052}
5053
5054
5055/** Opcode 0x4f. */
5056FNIEMOP_DEF(iemOp_dec_eDI)
5057{
5058 /*
5059 * This is a REX prefix in 64-bit mode.
5060 */
5061 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5062 {
5063 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5064 pIemCpu->uRexReg = 1 << 3;
5065 pIemCpu->uRexB = 1 << 3;
5066 pIemCpu->uRexIndex = 1 << 3;
5067 iemRecalEffOpSize(pIemCpu);
5068
5069 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5070 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5071 }
5072
5073 IEMOP_MNEMONIC("dec eDI");
5074 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5075}
5076
5077
5078/**
5079 * Common 'push register' helper.
5080 */
5081FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5082{
5083 IEMOP_HLP_NO_LOCK_PREFIX();
5084 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5085 {
5086 iReg |= pIemCpu->uRexB;
5087 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5088 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5089 }
5090
5091 switch (pIemCpu->enmEffOpSize)
5092 {
5093 case IEMMODE_16BIT:
5094 IEM_MC_BEGIN(0, 1);
5095 IEM_MC_LOCAL(uint16_t, u16Value);
5096 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5097 IEM_MC_PUSH_U16(u16Value);
5098 IEM_MC_ADVANCE_RIP();
5099 IEM_MC_END();
5100 break;
5101
5102 case IEMMODE_32BIT:
5103 IEM_MC_BEGIN(0, 1);
5104 IEM_MC_LOCAL(uint32_t, u32Value);
5105 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5106 IEM_MC_PUSH_U32(u32Value);
5107 IEM_MC_ADVANCE_RIP();
5108 IEM_MC_END();
5109 break;
5110
5111 case IEMMODE_64BIT:
5112 IEM_MC_BEGIN(0, 1);
5113 IEM_MC_LOCAL(uint64_t, u64Value);
5114 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5115 IEM_MC_PUSH_U64(u64Value);
5116 IEM_MC_ADVANCE_RIP();
5117 IEM_MC_END();
5118 break;
5119 }
5120
5121 return VINF_SUCCESS;
5122}
5123
5124
5125/** Opcode 0x50. */
5126FNIEMOP_DEF(iemOp_push_eAX)
5127{
5128 IEMOP_MNEMONIC("push rAX");
5129 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5130}
5131
5132
5133/** Opcode 0x51. */
5134FNIEMOP_DEF(iemOp_push_eCX)
5135{
5136 IEMOP_MNEMONIC("push rCX");
5137 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5138}
5139
5140
5141/** Opcode 0x52. */
5142FNIEMOP_DEF(iemOp_push_eDX)
5143{
5144 IEMOP_MNEMONIC("push rDX");
5145 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5146}
5147
5148
5149/** Opcode 0x53. */
5150FNIEMOP_DEF(iemOp_push_eBX)
5151{
5152 IEMOP_MNEMONIC("push rBX");
5153 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5154}
5155
5156
5157/** Opcode 0x54. */
5158FNIEMOP_DEF(iemOp_push_eSP)
5159{
5160 IEMOP_MNEMONIC("push rSP");
5161 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5162}
5163
5164
5165/** Opcode 0x55. */
5166FNIEMOP_DEF(iemOp_push_eBP)
5167{
5168 IEMOP_MNEMONIC("push rBP");
5169 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5170}
5171
5172
5173/** Opcode 0x56. */
5174FNIEMOP_DEF(iemOp_push_eSI)
5175{
5176 IEMOP_MNEMONIC("push rSI");
5177 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5178}
5179
5180
5181/** Opcode 0x57. */
5182FNIEMOP_DEF(iemOp_push_eDI)
5183{
5184 IEMOP_MNEMONIC("push rDI");
5185 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5186}
5187
5188
5189/**
5190 * Common 'pop register' helper.
5191 */
5192FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5193{
5194 IEMOP_HLP_NO_LOCK_PREFIX();
5195 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5196 {
5197 iReg |= pIemCpu->uRexB;
5198 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5199 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5200 }
5201
5202/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5203 * handle it, for that matter (Intel pseudo code hints that the popped
5204 * value is incremented by the stack item size.) Test it, both encodings
5205 * and all three register sizes. */
5206 switch (pIemCpu->enmEffOpSize)
5207 {
5208 case IEMMODE_16BIT:
5209 IEM_MC_BEGIN(0, 1);
5210 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5211 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5212 IEM_MC_POP_U16(pu16Dst);
5213 IEM_MC_ADVANCE_RIP();
5214 IEM_MC_END();
5215 break;
5216
5217 case IEMMODE_32BIT:
5218 IEM_MC_BEGIN(0, 1);
5219 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5220 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5221 IEM_MC_POP_U32(pu32Dst);
5222 IEM_MC_ADVANCE_RIP();
5223 IEM_MC_END();
5224 break;
5225
5226 case IEMMODE_64BIT:
5227 IEM_MC_BEGIN(0, 1);
5228 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5229 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5230 IEM_MC_POP_U64(pu64Dst);
5231 IEM_MC_ADVANCE_RIP();
5232 IEM_MC_END();
5233 break;
5234 }
5235
5236 return VINF_SUCCESS;
5237}
5238
5239
5240/** Opcode 0x58. */
5241FNIEMOP_DEF(iemOp_pop_eAX)
5242{
5243 IEMOP_MNEMONIC("pop rAX");
5244 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5245}
5246
5247
5248/** Opcode 0x59. */
5249FNIEMOP_DEF(iemOp_pop_eCX)
5250{
5251 IEMOP_MNEMONIC("pop rCX");
5252 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5253}
5254
5255
5256/** Opcode 0x5a. */
5257FNIEMOP_DEF(iemOp_pop_eDX)
5258{
5259 IEMOP_MNEMONIC("pop rDX");
5260 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5261}
5262
5263
5264/** Opcode 0x5b. */
5265FNIEMOP_DEF(iemOp_pop_eBX)
5266{
5267 IEMOP_MNEMONIC("pop rBX");
5268 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5269}
5270
5271
5272/** Opcode 0x5c. */
5273FNIEMOP_DEF(iemOp_pop_eSP)
5274{
5275 IEMOP_MNEMONIC("pop rSP");
5276 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5277}
5278
5279
5280/** Opcode 0x5d. */
5281FNIEMOP_DEF(iemOp_pop_eBP)
5282{
5283 IEMOP_MNEMONIC("pop rBP");
5284 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5285}
5286
5287
5288/** Opcode 0x5e. */
5289FNIEMOP_DEF(iemOp_pop_eSI)
5290{
5291 IEMOP_MNEMONIC("pop rSI");
5292 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5293}
5294
5295
5296/** Opcode 0x5f. */
5297FNIEMOP_DEF(iemOp_pop_eDI)
5298{
5299 IEMOP_MNEMONIC("pop rDI");
5300 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5301}
5302
5303
5304/** Opcode 0x60. */
5305FNIEMOP_DEF(iemOp_pusha)
5306{
5307 IEMOP_MNEMONIC("pusha");
5308 IEMOP_HLP_NO_64BIT();
5309 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5310 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5311 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5312 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5313}
5314
5315
5316/** Opcode 0x61. */
5317FNIEMOP_DEF(iemOp_popa)
5318{
5319 IEMOP_MNEMONIC("popa");
5320 IEMOP_HLP_NO_64BIT();
5321 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5322 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5323 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5324 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5325}
5326
5327
5328/** Opcode 0x62. */
5329FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5330/** Opcode 0x63. */
5331FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5332
5333
5334/** Opcode 0x64. */
5335FNIEMOP_DEF(iemOp_seg_FS)
5336{
5337 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5338 pIemCpu->iEffSeg = X86_SREG_FS;
5339
5340 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5341 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5342}
5343
5344
5345/** Opcode 0x65. */
5346FNIEMOP_DEF(iemOp_seg_GS)
5347{
5348 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5349 pIemCpu->iEffSeg = X86_SREG_GS;
5350
5351 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5352 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5353}
5354
5355
5356/** Opcode 0x66. */
5357FNIEMOP_DEF(iemOp_op_size)
5358{
5359 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5360 iemRecalEffOpSize(pIemCpu);
5361
5362 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5363 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5364}
5365
5366
5367/** Opcode 0x67. */
5368FNIEMOP_DEF(iemOp_addr_size)
5369{
5370 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5371 switch (pIemCpu->enmDefAddrMode)
5372 {
5373 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5374 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5375 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5376 default: AssertFailed();
5377 }
5378
5379 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5380 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5381}
5382
5383
5384/** Opcode 0x68. */
5385FNIEMOP_DEF(iemOp_push_Iz)
5386{
5387 IEMOP_MNEMONIC("push Iz");
5388 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5389 switch (pIemCpu->enmEffOpSize)
5390 {
5391 case IEMMODE_16BIT:
5392 {
5393 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5394 IEMOP_HLP_NO_LOCK_PREFIX();
5395 IEM_MC_BEGIN(0,0);
5396 IEM_MC_PUSH_U16(u16Imm);
5397 IEM_MC_ADVANCE_RIP();
5398 IEM_MC_END();
5399 return VINF_SUCCESS;
5400 }
5401
5402 case IEMMODE_32BIT:
5403 {
5404 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5405 IEMOP_HLP_NO_LOCK_PREFIX();
5406 IEM_MC_BEGIN(0,0);
5407 IEM_MC_PUSH_U32(u32Imm);
5408 IEM_MC_ADVANCE_RIP();
5409 IEM_MC_END();
5410 return VINF_SUCCESS;
5411 }
5412
5413 case IEMMODE_64BIT:
5414 {
5415 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5416 IEMOP_HLP_NO_LOCK_PREFIX();
5417 IEM_MC_BEGIN(0,0);
5418 IEM_MC_PUSH_U64(u64Imm);
5419 IEM_MC_ADVANCE_RIP();
5420 IEM_MC_END();
5421 return VINF_SUCCESS;
5422 }
5423
5424 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5425 }
5426}
5427
5428
5429/** Opcode 0x69. */
5430FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5431{
5432 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5433 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5434 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5435
5436 switch (pIemCpu->enmEffOpSize)
5437 {
5438 case IEMMODE_16BIT:
5439 {
5440 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5441 IEMOP_HLP_NO_LOCK_PREFIX();
5442 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5443 {
5444 /* register operand */
5445 IEM_MC_BEGIN(3, 1);
5446 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5447 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5448 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5449 IEM_MC_LOCAL(uint16_t, u16Tmp);
5450
5451 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5452 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5453 IEM_MC_REF_EFLAGS(pEFlags);
5454 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5455 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5456
5457 IEM_MC_ADVANCE_RIP();
5458 IEM_MC_END();
5459 }
5460 else
5461 {
5462 /* memory operand */
5463 IEM_MC_BEGIN(3, 2);
5464 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5465 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5466 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5467 IEM_MC_LOCAL(uint16_t, u16Tmp);
5468 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5469
5470 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5471 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5472 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5473 IEM_MC_REF_EFLAGS(pEFlags);
5474 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5475 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5476
5477 IEM_MC_ADVANCE_RIP();
5478 IEM_MC_END();
5479 }
5480 return VINF_SUCCESS;
5481 }
5482
5483 case IEMMODE_32BIT:
5484 {
5485 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5486 IEMOP_HLP_NO_LOCK_PREFIX();
5487 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5488 {
5489 /* register operand */
5490 IEM_MC_BEGIN(3, 1);
5491 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5492 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5493 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5494 IEM_MC_LOCAL(uint32_t, u32Tmp);
5495
5496 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5497 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5498 IEM_MC_REF_EFLAGS(pEFlags);
5499 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5500 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5501
5502 IEM_MC_ADVANCE_RIP();
5503 IEM_MC_END();
5504 }
5505 else
5506 {
5507 /* memory operand */
5508 IEM_MC_BEGIN(3, 2);
5509 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5510 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5511 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5512 IEM_MC_LOCAL(uint32_t, u32Tmp);
5513 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5514
5515 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5516 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5517 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5518 IEM_MC_REF_EFLAGS(pEFlags);
5519 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5520 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5521
5522 IEM_MC_ADVANCE_RIP();
5523 IEM_MC_END();
5524 }
5525 return VINF_SUCCESS;
5526 }
5527
5528 case IEMMODE_64BIT:
5529 {
5530 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5531 IEMOP_HLP_NO_LOCK_PREFIX();
5532 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5533 {
5534 /* register operand */
5535 IEM_MC_BEGIN(3, 1);
5536 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5537 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5538 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5539 IEM_MC_LOCAL(uint64_t, u64Tmp);
5540
5541 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5542 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5543 IEM_MC_REF_EFLAGS(pEFlags);
5544 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5545 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5546
5547 IEM_MC_ADVANCE_RIP();
5548 IEM_MC_END();
5549 }
5550 else
5551 {
5552 /* memory operand */
5553 IEM_MC_BEGIN(3, 2);
5554 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5555 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5556 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5557 IEM_MC_LOCAL(uint64_t, u64Tmp);
5558 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5559
5560 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5561 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5562 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5563 IEM_MC_REF_EFLAGS(pEFlags);
5564 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5565 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5566
5567 IEM_MC_ADVANCE_RIP();
5568 IEM_MC_END();
5569 }
5570 return VINF_SUCCESS;
5571 }
5572 }
5573 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5574}
5575
5576
5577/** Opcode 0x6a. */
5578FNIEMOP_DEF(iemOp_push_Ib)
5579{
5580 IEMOP_MNEMONIC("push Ib");
5581 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5582 IEMOP_HLP_NO_LOCK_PREFIX();
5583 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5584
5585 IEM_MC_BEGIN(0,0);
5586 switch (pIemCpu->enmEffOpSize)
5587 {
5588 case IEMMODE_16BIT:
5589 IEM_MC_PUSH_U16(i8Imm);
5590 break;
5591 case IEMMODE_32BIT:
5592 IEM_MC_PUSH_U32(i8Imm);
5593 break;
5594 case IEMMODE_64BIT:
5595 IEM_MC_PUSH_U64(i8Imm);
5596 break;
5597 }
5598 IEM_MC_ADVANCE_RIP();
5599 IEM_MC_END();
5600 return VINF_SUCCESS;
5601}
5602
5603
5604/** Opcode 0x6b. */
5605FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
5606{
5607 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
5608 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5609 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
5610 IEMOP_HLP_NO_LOCK_PREFIX();
5611 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5612
5613 switch (pIemCpu->enmEffOpSize)
5614 {
5615 case IEMMODE_16BIT:
5616 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5617 {
5618 /* register operand */
5619 IEM_MC_BEGIN(3, 1);
5620 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5621 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5622 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5623 IEM_MC_LOCAL(uint16_t, u16Tmp);
5624
5625 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5626 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5627 IEM_MC_REF_EFLAGS(pEFlags);
5628 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5629 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5630
5631 IEM_MC_ADVANCE_RIP();
5632 IEM_MC_END();
5633 }
5634 else
5635 {
5636 /* memory operand */
5637 IEM_MC_BEGIN(3, 2);
5638 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5639 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5640 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5641 IEM_MC_LOCAL(uint16_t, u16Tmp);
5642 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5643
5644 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5645 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5646 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5647 IEM_MC_REF_EFLAGS(pEFlags);
5648 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5649 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5650
5651 IEM_MC_ADVANCE_RIP();
5652 IEM_MC_END();
5653 }
5654 return VINF_SUCCESS;
5655
5656 case IEMMODE_32BIT:
5657 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5658 {
5659 /* register operand */
5660 IEM_MC_BEGIN(3, 1);
5661 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5662 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5663 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5664 IEM_MC_LOCAL(uint32_t, u32Tmp);
5665
5666 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5667 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5668 IEM_MC_REF_EFLAGS(pEFlags);
5669 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5670 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5671
5672 IEM_MC_ADVANCE_RIP();
5673 IEM_MC_END();
5674 }
5675 else
5676 {
5677 /* memory operand */
5678 IEM_MC_BEGIN(3, 2);
5679 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5680 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5681 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5682 IEM_MC_LOCAL(uint32_t, u32Tmp);
5683 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5684
5685 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5686 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5687 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5688 IEM_MC_REF_EFLAGS(pEFlags);
5689 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5690 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5691
5692 IEM_MC_ADVANCE_RIP();
5693 IEM_MC_END();
5694 }
5695 return VINF_SUCCESS;
5696
5697 case IEMMODE_64BIT:
5698 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5699 {
5700 /* register operand */
5701 IEM_MC_BEGIN(3, 1);
5702 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5703 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5704 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5705 IEM_MC_LOCAL(uint64_t, u64Tmp);
5706
5707 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5708 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5709 IEM_MC_REF_EFLAGS(pEFlags);
5710 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5711 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5712
5713 IEM_MC_ADVANCE_RIP();
5714 IEM_MC_END();
5715 }
5716 else
5717 {
5718 /* memory operand */
5719 IEM_MC_BEGIN(3, 2);
5720 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5721 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5722 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5723 IEM_MC_LOCAL(uint64_t, u64Tmp);
5724 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5725
5726 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5727 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5728 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5729 IEM_MC_REF_EFLAGS(pEFlags);
5730 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5731 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5732
5733 IEM_MC_ADVANCE_RIP();
5734 IEM_MC_END();
5735 }
5736 return VINF_SUCCESS;
5737 }
5738 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5739}
5740
5741
5742/** Opcode 0x6c. */
5743FNIEMOP_DEF(iemOp_insb_Yb_DX)
5744{
5745 IEMOP_HLP_NO_LOCK_PREFIX();
5746 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
5747 {
5748 IEMOP_MNEMONIC("rep ins Yb,DX");
5749 switch (pIemCpu->enmEffAddrMode)
5750 {
5751 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
5752 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
5753 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
5754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5755 }
5756 }
5757 else
5758 {
5759 IEMOP_MNEMONIC("ins Yb,DX");
5760 switch (pIemCpu->enmEffAddrMode)
5761 {
5762 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
5763 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
5764 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
5765 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5766 }
5767 }
5768}
5769
5770
5771/** Opcode 0x6d. */
5772FNIEMOP_DEF(iemOp_inswd_Yv_DX)
5773{
5774 IEMOP_HLP_NO_LOCK_PREFIX();
5775 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
5776 {
5777 IEMOP_MNEMONIC("rep ins Yv,DX");
5778 switch (pIemCpu->enmEffOpSize)
5779 {
5780 case IEMMODE_16BIT:
5781 switch (pIemCpu->enmEffAddrMode)
5782 {
5783 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
5784 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
5785 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
5786 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5787 }
5788 break;
5789 case IEMMODE_64BIT:
5790 case IEMMODE_32BIT:
5791 switch (pIemCpu->enmEffAddrMode)
5792 {
5793 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
5794 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
5795 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
5796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5797 }
5798 break;
5799 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5800 }
5801 }
5802 else
5803 {
5804 IEMOP_MNEMONIC("ins Yv,DX");
5805 switch (pIemCpu->enmEffOpSize)
5806 {
5807 case IEMMODE_16BIT:
5808 switch (pIemCpu->enmEffAddrMode)
5809 {
5810 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
5811 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
5812 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
5813 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5814 }
5815 break;
5816 case IEMMODE_64BIT:
5817 case IEMMODE_32BIT:
5818 switch (pIemCpu->enmEffAddrMode)
5819 {
5820 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
5821 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
5822 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
5823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5824 }
5825 break;
5826 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5827 }
5828 }
5829}
5830
5831
5832/** Opcode 0x6e. */
5833FNIEMOP_DEF(iemOp_outsb_Yb_DX)
5834{
5835 IEMOP_HLP_NO_LOCK_PREFIX();
5836 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
5837 {
5838 IEMOP_MNEMONIC("rep out DX,Yb");
5839 switch (pIemCpu->enmEffAddrMode)
5840 {
5841 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
5842 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
5843 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
5844 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5845 }
5846 }
5847 else
5848 {
5849 IEMOP_MNEMONIC("out DX,Yb");
5850 switch (pIemCpu->enmEffAddrMode)
5851 {
5852 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
5853 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
5854 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
5855 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5856 }
5857 }
5858}
5859
5860
5861/** Opcode 0x6f. */
5862FNIEMOP_DEF(iemOp_outswd_Yv_DX)
5863{
5864 IEMOP_HLP_NO_LOCK_PREFIX();
5865 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
5866 {
5867 IEMOP_MNEMONIC("rep outs DX,Yv");
5868 switch (pIemCpu->enmEffOpSize)
5869 {
5870 case IEMMODE_16BIT:
5871 switch (pIemCpu->enmEffAddrMode)
5872 {
5873 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
5874 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
5875 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
5876 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5877 }
5878 break;
5879 case IEMMODE_64BIT:
5880 case IEMMODE_32BIT:
5881 switch (pIemCpu->enmEffAddrMode)
5882 {
5883 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
5884 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
5885 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
5886 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5887 }
5888 break;
5889 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5890 }
5891 }
5892 else
5893 {
5894 IEMOP_MNEMONIC("outs DX,Yv");
5895 switch (pIemCpu->enmEffOpSize)
5896 {
5897 case IEMMODE_16BIT:
5898 switch (pIemCpu->enmEffAddrMode)
5899 {
5900 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
5901 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
5902 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
5903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5904 }
5905 break;
5906 case IEMMODE_64BIT:
5907 case IEMMODE_32BIT:
5908 switch (pIemCpu->enmEffAddrMode)
5909 {
5910 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
5911 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
5912 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
5913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5914 }
5915 break;
5916 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5917 }
5918 }
5919}
5920
5921
5922/** Opcode 0x70. */
5923FNIEMOP_DEF(iemOp_jo_Jb)
5924{
5925 IEMOP_MNEMONIC("jo Jb");
5926 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5927 IEMOP_HLP_NO_LOCK_PREFIX();
5928 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5929
5930 IEM_MC_BEGIN(0, 0);
5931 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
5932 IEM_MC_REL_JMP_S8(i8Imm);
5933 } IEM_MC_ELSE() {
5934 IEM_MC_ADVANCE_RIP();
5935 } IEM_MC_ENDIF();
5936 IEM_MC_END();
5937 return VINF_SUCCESS;
5938}
5939
5940
5941/** Opcode 0x71. */
5942FNIEMOP_DEF(iemOp_jno_Jb)
5943{
5944 IEMOP_MNEMONIC("jno Jb");
5945 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5946 IEMOP_HLP_NO_LOCK_PREFIX();
5947 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5948
5949 IEM_MC_BEGIN(0, 0);
5950 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
5951 IEM_MC_ADVANCE_RIP();
5952 } IEM_MC_ELSE() {
5953 IEM_MC_REL_JMP_S8(i8Imm);
5954 } IEM_MC_ENDIF();
5955 IEM_MC_END();
5956 return VINF_SUCCESS;
5957}
5958
5959/** Opcode 0x72. */
5960FNIEMOP_DEF(iemOp_jc_Jb)
5961{
5962 IEMOP_MNEMONIC("jc/jnae Jb");
5963 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5964 IEMOP_HLP_NO_LOCK_PREFIX();
5965 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5966
5967 IEM_MC_BEGIN(0, 0);
5968 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
5969 IEM_MC_REL_JMP_S8(i8Imm);
5970 } IEM_MC_ELSE() {
5971 IEM_MC_ADVANCE_RIP();
5972 } IEM_MC_ENDIF();
5973 IEM_MC_END();
5974 return VINF_SUCCESS;
5975}
5976
5977
5978/** Opcode 0x73. */
5979FNIEMOP_DEF(iemOp_jnc_Jb)
5980{
5981 IEMOP_MNEMONIC("jnc/jnb Jb");
5982 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5983 IEMOP_HLP_NO_LOCK_PREFIX();
5984 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5985
5986 IEM_MC_BEGIN(0, 0);
5987 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
5988 IEM_MC_ADVANCE_RIP();
5989 } IEM_MC_ELSE() {
5990 IEM_MC_REL_JMP_S8(i8Imm);
5991 } IEM_MC_ENDIF();
5992 IEM_MC_END();
5993 return VINF_SUCCESS;
5994}
5995
5996
5997/** Opcode 0x74. */
5998FNIEMOP_DEF(iemOp_je_Jb)
5999{
6000 IEMOP_MNEMONIC("je/jz Jb");
6001 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6002 IEMOP_HLP_NO_LOCK_PREFIX();
6003 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6004
6005 IEM_MC_BEGIN(0, 0);
6006 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6007 IEM_MC_REL_JMP_S8(i8Imm);
6008 } IEM_MC_ELSE() {
6009 IEM_MC_ADVANCE_RIP();
6010 } IEM_MC_ENDIF();
6011 IEM_MC_END();
6012 return VINF_SUCCESS;
6013}
6014
6015
6016/** Opcode 0x75. */
6017FNIEMOP_DEF(iemOp_jne_Jb)
6018{
6019 IEMOP_MNEMONIC("jne/jnz Jb");
6020 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6021 IEMOP_HLP_NO_LOCK_PREFIX();
6022 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6023
6024 IEM_MC_BEGIN(0, 0);
6025 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6026 IEM_MC_ADVANCE_RIP();
6027 } IEM_MC_ELSE() {
6028 IEM_MC_REL_JMP_S8(i8Imm);
6029 } IEM_MC_ENDIF();
6030 IEM_MC_END();
6031 return VINF_SUCCESS;
6032}
6033
6034
6035/** Opcode 0x76. */
6036FNIEMOP_DEF(iemOp_jbe_Jb)
6037{
6038 IEMOP_MNEMONIC("jbe/jna Jb");
6039 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6040 IEMOP_HLP_NO_LOCK_PREFIX();
6041 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6042
6043 IEM_MC_BEGIN(0, 0);
6044 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6045 IEM_MC_REL_JMP_S8(i8Imm);
6046 } IEM_MC_ELSE() {
6047 IEM_MC_ADVANCE_RIP();
6048 } IEM_MC_ENDIF();
6049 IEM_MC_END();
6050 return VINF_SUCCESS;
6051}
6052
6053
6054/** Opcode 0x77. */
6055FNIEMOP_DEF(iemOp_jnbe_Jb)
6056{
6057 IEMOP_MNEMONIC("jnbe/ja Jb");
6058 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6059 IEMOP_HLP_NO_LOCK_PREFIX();
6060 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6061
6062 IEM_MC_BEGIN(0, 0);
6063 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6064 IEM_MC_ADVANCE_RIP();
6065 } IEM_MC_ELSE() {
6066 IEM_MC_REL_JMP_S8(i8Imm);
6067 } IEM_MC_ENDIF();
6068 IEM_MC_END();
6069 return VINF_SUCCESS;
6070}
6071
6072
6073/** Opcode 0x78. */
6074FNIEMOP_DEF(iemOp_js_Jb)
6075{
6076 IEMOP_MNEMONIC("js Jb");
6077 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6078 IEMOP_HLP_NO_LOCK_PREFIX();
6079 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6080
6081 IEM_MC_BEGIN(0, 0);
6082 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6083 IEM_MC_REL_JMP_S8(i8Imm);
6084 } IEM_MC_ELSE() {
6085 IEM_MC_ADVANCE_RIP();
6086 } IEM_MC_ENDIF();
6087 IEM_MC_END();
6088 return VINF_SUCCESS;
6089}
6090
6091
6092/** Opcode 0x79. */
6093FNIEMOP_DEF(iemOp_jns_Jb)
6094{
6095 IEMOP_MNEMONIC("jns Jb");
6096 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6097 IEMOP_HLP_NO_LOCK_PREFIX();
6098 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6099
6100 IEM_MC_BEGIN(0, 0);
6101 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6102 IEM_MC_ADVANCE_RIP();
6103 } IEM_MC_ELSE() {
6104 IEM_MC_REL_JMP_S8(i8Imm);
6105 } IEM_MC_ENDIF();
6106 IEM_MC_END();
6107 return VINF_SUCCESS;
6108}
6109
6110
6111/** Opcode 0x7a. */
6112FNIEMOP_DEF(iemOp_jp_Jb)
6113{
6114 IEMOP_MNEMONIC("jp Jb");
6115 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6116 IEMOP_HLP_NO_LOCK_PREFIX();
6117 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6118
6119 IEM_MC_BEGIN(0, 0);
6120 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6121 IEM_MC_REL_JMP_S8(i8Imm);
6122 } IEM_MC_ELSE() {
6123 IEM_MC_ADVANCE_RIP();
6124 } IEM_MC_ENDIF();
6125 IEM_MC_END();
6126 return VINF_SUCCESS;
6127}
6128
6129
6130/** Opcode 0x7b. */
6131FNIEMOP_DEF(iemOp_jnp_Jb)
6132{
6133 IEMOP_MNEMONIC("jnp Jb");
6134 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6135 IEMOP_HLP_NO_LOCK_PREFIX();
6136 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6137
6138 IEM_MC_BEGIN(0, 0);
6139 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6140 IEM_MC_ADVANCE_RIP();
6141 } IEM_MC_ELSE() {
6142 IEM_MC_REL_JMP_S8(i8Imm);
6143 } IEM_MC_ENDIF();
6144 IEM_MC_END();
6145 return VINF_SUCCESS;
6146}
6147
6148
6149/** Opcode 0x7c. */
6150FNIEMOP_DEF(iemOp_jl_Jb)
6151{
6152 IEMOP_MNEMONIC("jl/jnge Jb");
6153 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6154 IEMOP_HLP_NO_LOCK_PREFIX();
6155 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6156
6157 IEM_MC_BEGIN(0, 0);
6158 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6159 IEM_MC_REL_JMP_S8(i8Imm);
6160 } IEM_MC_ELSE() {
6161 IEM_MC_ADVANCE_RIP();
6162 } IEM_MC_ENDIF();
6163 IEM_MC_END();
6164 return VINF_SUCCESS;
6165}
6166
6167
6168/** Opcode 0x7d. */
6169FNIEMOP_DEF(iemOp_jnl_Jb)
6170{
6171 IEMOP_MNEMONIC("jnl/jge Jb");
6172 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6173 IEMOP_HLP_NO_LOCK_PREFIX();
6174 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6175
6176 IEM_MC_BEGIN(0, 0);
6177 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6178 IEM_MC_ADVANCE_RIP();
6179 } IEM_MC_ELSE() {
6180 IEM_MC_REL_JMP_S8(i8Imm);
6181 } IEM_MC_ENDIF();
6182 IEM_MC_END();
6183 return VINF_SUCCESS;
6184}
6185
6186
6187/** Opcode 0x7e. */
6188FNIEMOP_DEF(iemOp_jle_Jb)
6189{
6190 IEMOP_MNEMONIC("jle/jng Jb");
6191 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6192 IEMOP_HLP_NO_LOCK_PREFIX();
6193 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6194
6195 IEM_MC_BEGIN(0, 0);
6196 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6197 IEM_MC_REL_JMP_S8(i8Imm);
6198 } IEM_MC_ELSE() {
6199 IEM_MC_ADVANCE_RIP();
6200 } IEM_MC_ENDIF();
6201 IEM_MC_END();
6202 return VINF_SUCCESS;
6203}
6204
6205
6206/** Opcode 0x7f. */
6207FNIEMOP_DEF(iemOp_jnle_Jb)
6208{
6209 IEMOP_MNEMONIC("jnle/jg Jb");
6210 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6211 IEMOP_HLP_NO_LOCK_PREFIX();
6212 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6213
6214 IEM_MC_BEGIN(0, 0);
6215 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6216 IEM_MC_ADVANCE_RIP();
6217 } IEM_MC_ELSE() {
6218 IEM_MC_REL_JMP_S8(i8Imm);
6219 } IEM_MC_ENDIF();
6220 IEM_MC_END();
6221 return VINF_SUCCESS;
6222}
6223
6224
6225/** Opcode 0x80. */
6226FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6227{
6228 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6229 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6230 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6231
6232 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6233 {
6234 /* register target */
6235 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6236 IEMOP_HLP_NO_LOCK_PREFIX();
6237 IEM_MC_BEGIN(3, 0);
6238 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6239 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6240 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6241
6242 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6243 IEM_MC_REF_EFLAGS(pEFlags);
6244 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6245
6246 IEM_MC_ADVANCE_RIP();
6247 IEM_MC_END();
6248 }
6249 else
6250 {
6251 /* memory target */
6252 uint32_t fAccess;
6253 if (pImpl->pfnLockedU8)
6254 fAccess = IEM_ACCESS_DATA_RW;
6255 else
6256 { /* CMP */
6257 IEMOP_HLP_NO_LOCK_PREFIX();
6258 fAccess = IEM_ACCESS_DATA_R;
6259 }
6260 IEM_MC_BEGIN(3, 2);
6261 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6262 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6263 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6264
6265 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6266 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6267 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6268
6269 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6270 IEM_MC_FETCH_EFLAGS(EFlags);
6271 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6272 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6273 else
6274 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6275
6276 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6277 IEM_MC_COMMIT_EFLAGS(EFlags);
6278 IEM_MC_ADVANCE_RIP();
6279 IEM_MC_END();
6280 }
6281 return VINF_SUCCESS;
6282}
6283
6284
6285/** Opcode 0x81. */
6286FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6287{
6288 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6289 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6290 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6291
6292 switch (pIemCpu->enmEffOpSize)
6293 {
6294 case IEMMODE_16BIT:
6295 {
6296 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6297 {
6298 /* register target */
6299 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6300 IEMOP_HLP_NO_LOCK_PREFIX();
6301 IEM_MC_BEGIN(3, 0);
6302 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6303 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6304 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6305
6306 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6307 IEM_MC_REF_EFLAGS(pEFlags);
6308 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6309
6310 IEM_MC_ADVANCE_RIP();
6311 IEM_MC_END();
6312 }
6313 else
6314 {
6315 /* memory target */
6316 uint32_t fAccess;
6317 if (pImpl->pfnLockedU16)
6318 fAccess = IEM_ACCESS_DATA_RW;
6319 else
6320 { /* CMP, TEST */
6321 IEMOP_HLP_NO_LOCK_PREFIX();
6322 fAccess = IEM_ACCESS_DATA_R;
6323 }
6324 IEM_MC_BEGIN(3, 2);
6325 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6326 IEM_MC_ARG(uint16_t, u16Src, 1);
6327 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6328 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6329
6330 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6331 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6332 IEM_MC_ASSIGN(u16Src, u16Imm);
6333 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6334 IEM_MC_FETCH_EFLAGS(EFlags);
6335 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6336 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6337 else
6338 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6339
6340 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6341 IEM_MC_COMMIT_EFLAGS(EFlags);
6342 IEM_MC_ADVANCE_RIP();
6343 IEM_MC_END();
6344 }
6345 break;
6346 }
6347
6348 case IEMMODE_32BIT:
6349 {
6350 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6351 {
6352 /* register target */
6353 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6354 IEMOP_HLP_NO_LOCK_PREFIX();
6355 IEM_MC_BEGIN(3, 0);
6356 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6357 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6358 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6359
6360 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6361 IEM_MC_REF_EFLAGS(pEFlags);
6362 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6363
6364 IEM_MC_ADVANCE_RIP();
6365 IEM_MC_END();
6366 }
6367 else
6368 {
6369 /* memory target */
6370 uint32_t fAccess;
6371 if (pImpl->pfnLockedU32)
6372 fAccess = IEM_ACCESS_DATA_RW;
6373 else
6374 { /* CMP, TEST */
6375 IEMOP_HLP_NO_LOCK_PREFIX();
6376 fAccess = IEM_ACCESS_DATA_R;
6377 }
6378 IEM_MC_BEGIN(3, 2);
6379 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6380 IEM_MC_ARG(uint32_t, u32Src, 1);
6381 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6382 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6383
6384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6385 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6386 IEM_MC_ASSIGN(u32Src, u32Imm);
6387 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6388 IEM_MC_FETCH_EFLAGS(EFlags);
6389 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6390 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6391 else
6392 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6393
6394 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6395 IEM_MC_COMMIT_EFLAGS(EFlags);
6396 IEM_MC_ADVANCE_RIP();
6397 IEM_MC_END();
6398 }
6399 break;
6400 }
6401
6402 case IEMMODE_64BIT:
6403 {
6404 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6405 {
6406 /* register target */
6407 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6408 IEMOP_HLP_NO_LOCK_PREFIX();
6409 IEM_MC_BEGIN(3, 0);
6410 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6411 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6412 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6413
6414 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6415 IEM_MC_REF_EFLAGS(pEFlags);
6416 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6417
6418 IEM_MC_ADVANCE_RIP();
6419 IEM_MC_END();
6420 }
6421 else
6422 {
6423 /* memory target */
6424 uint32_t fAccess;
6425 if (pImpl->pfnLockedU64)
6426 fAccess = IEM_ACCESS_DATA_RW;
6427 else
6428 { /* CMP */
6429 IEMOP_HLP_NO_LOCK_PREFIX();
6430 fAccess = IEM_ACCESS_DATA_R;
6431 }
6432 IEM_MC_BEGIN(3, 2);
6433 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6434 IEM_MC_ARG(uint64_t, u64Src, 1);
6435 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6436 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6437
6438 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6439 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6440 IEM_MC_ASSIGN(u64Src, u64Imm);
6441 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6442 IEM_MC_FETCH_EFLAGS(EFlags);
6443 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6444 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6445 else
6446 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6447
6448 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6449 IEM_MC_COMMIT_EFLAGS(EFlags);
6450 IEM_MC_ADVANCE_RIP();
6451 IEM_MC_END();
6452 }
6453 break;
6454 }
6455 }
6456 return VINF_SUCCESS;
6457}
6458
6459
6460/** Opcode 0x82. */
6461 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
6462{
6463 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
6464 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
6465}
6466
6467
6468/** Opcode 0x83. */
6469FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
6470{
6471 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6472 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
6473 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6474
6475 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6476 {
6477 /*
6478 * Register target
6479 */
6480 IEMOP_HLP_NO_LOCK_PREFIX();
6481 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6482 switch (pIemCpu->enmEffOpSize)
6483 {
6484 case IEMMODE_16BIT:
6485 {
6486 IEM_MC_BEGIN(3, 0);
6487 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6488 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
6489 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6490
6491 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6492 IEM_MC_REF_EFLAGS(pEFlags);
6493 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6494
6495 IEM_MC_ADVANCE_RIP();
6496 IEM_MC_END();
6497 break;
6498 }
6499
6500 case IEMMODE_32BIT:
6501 {
6502 IEM_MC_BEGIN(3, 0);
6503 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6504 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
6505 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6506
6507 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6508 IEM_MC_REF_EFLAGS(pEFlags);
6509 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6510
6511 IEM_MC_ADVANCE_RIP();
6512 IEM_MC_END();
6513 break;
6514 }
6515
6516 case IEMMODE_64BIT:
6517 {
6518 IEM_MC_BEGIN(3, 0);
6519 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6520 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
6521 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6522
6523 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6524 IEM_MC_REF_EFLAGS(pEFlags);
6525 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6526
6527 IEM_MC_ADVANCE_RIP();
6528 IEM_MC_END();
6529 break;
6530 }
6531 }
6532 }
6533 else
6534 {
6535 /*
6536 * Memory target.
6537 */
6538 uint32_t fAccess;
6539 if (pImpl->pfnLockedU16)
6540 fAccess = IEM_ACCESS_DATA_RW;
6541 else
6542 { /* CMP */
6543 IEMOP_HLP_NO_LOCK_PREFIX();
6544 fAccess = IEM_ACCESS_DATA_R;
6545 }
6546
6547 switch (pIemCpu->enmEffOpSize)
6548 {
6549 case IEMMODE_16BIT:
6550 {
6551 IEM_MC_BEGIN(3, 2);
6552 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6553 IEM_MC_ARG(uint16_t, u16Src, 1);
6554 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6555 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6556
6557 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6558 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6559 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
6560 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6561 IEM_MC_FETCH_EFLAGS(EFlags);
6562 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6563 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6564 else
6565 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6566
6567 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6568 IEM_MC_COMMIT_EFLAGS(EFlags);
6569 IEM_MC_ADVANCE_RIP();
6570 IEM_MC_END();
6571 break;
6572 }
6573
6574 case IEMMODE_32BIT:
6575 {
6576 IEM_MC_BEGIN(3, 2);
6577 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6578 IEM_MC_ARG(uint32_t, u32Src, 1);
6579 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6580 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6581
6582 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6583 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6584 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
6585 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6586 IEM_MC_FETCH_EFLAGS(EFlags);
6587 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6588 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6589 else
6590 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6591
6592 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6593 IEM_MC_COMMIT_EFLAGS(EFlags);
6594 IEM_MC_ADVANCE_RIP();
6595 IEM_MC_END();
6596 break;
6597 }
6598
6599 case IEMMODE_64BIT:
6600 {
6601 IEM_MC_BEGIN(3, 2);
6602 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6603 IEM_MC_ARG(uint64_t, u64Src, 1);
6604 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6605 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6606
6607 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6608 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6609 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
6610 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6611 IEM_MC_FETCH_EFLAGS(EFlags);
6612 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6613 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6614 else
6615 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6616
6617 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6618 IEM_MC_COMMIT_EFLAGS(EFlags);
6619 IEM_MC_ADVANCE_RIP();
6620 IEM_MC_END();
6621 break;
6622 }
6623 }
6624 }
6625 return VINF_SUCCESS;
6626}
6627
6628
6629/** Opcode 0x84. */
6630FNIEMOP_DEF(iemOp_test_Eb_Gb)
6631{
6632 IEMOP_MNEMONIC("test Eb,Gb");
6633 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6634 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6635 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
6636}
6637
6638
6639/** Opcode 0x85. */
6640FNIEMOP_DEF(iemOp_test_Ev_Gv)
6641{
6642 IEMOP_MNEMONIC("test Ev,Gv");
6643 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6644 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6645 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
6646}
6647
6648
6649/** Opcode 0x86. */
6650FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
6651{
6652 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6653 IEMOP_MNEMONIC("xchg Eb,Gb");
6654
6655 /*
6656 * If rm is denoting a register, no more instruction bytes.
6657 */
6658 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6659 {
6660 IEMOP_HLP_NO_LOCK_PREFIX();
6661
6662 IEM_MC_BEGIN(0, 2);
6663 IEM_MC_LOCAL(uint8_t, uTmp1);
6664 IEM_MC_LOCAL(uint8_t, uTmp2);
6665
6666 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6667 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6668 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6669 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6670
6671 IEM_MC_ADVANCE_RIP();
6672 IEM_MC_END();
6673 }
6674 else
6675 {
6676 /*
6677 * We're accessing memory.
6678 */
6679 IEM_MC_BEGIN(2, 2);
6680 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
6681 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6682 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6683
6684 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6685 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6686 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6687 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
6688 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
6689
6690 IEM_MC_ADVANCE_RIP();
6691 IEM_MC_END();
6692 }
6693 return VINF_SUCCESS;
6694}
6695
6696
6697/** Opcode 0x87. */
6698FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
6699{
6700 IEMOP_MNEMONIC("xchg Ev,Gv");
6701 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6702
6703 /*
6704 * If rm is denoting a register, no more instruction bytes.
6705 */
6706 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6707 {
6708 IEMOP_HLP_NO_LOCK_PREFIX();
6709
6710 switch (pIemCpu->enmEffOpSize)
6711 {
6712 case IEMMODE_16BIT:
6713 IEM_MC_BEGIN(0, 2);
6714 IEM_MC_LOCAL(uint16_t, uTmp1);
6715 IEM_MC_LOCAL(uint16_t, uTmp2);
6716
6717 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6718 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6719 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6720 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6721
6722 IEM_MC_ADVANCE_RIP();
6723 IEM_MC_END();
6724 return VINF_SUCCESS;
6725
6726 case IEMMODE_32BIT:
6727 IEM_MC_BEGIN(0, 2);
6728 IEM_MC_LOCAL(uint32_t, uTmp1);
6729 IEM_MC_LOCAL(uint32_t, uTmp2);
6730
6731 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6732 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6733 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6734 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6735
6736 IEM_MC_ADVANCE_RIP();
6737 IEM_MC_END();
6738 return VINF_SUCCESS;
6739
6740 case IEMMODE_64BIT:
6741 IEM_MC_BEGIN(0, 2);
6742 IEM_MC_LOCAL(uint64_t, uTmp1);
6743 IEM_MC_LOCAL(uint64_t, uTmp2);
6744
6745 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6746 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6747 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6748 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6749
6750 IEM_MC_ADVANCE_RIP();
6751 IEM_MC_END();
6752 return VINF_SUCCESS;
6753
6754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6755 }
6756 }
6757 else
6758 {
6759 /*
6760 * We're accessing memory.
6761 */
6762 switch (pIemCpu->enmEffOpSize)
6763 {
6764 case IEMMODE_16BIT:
6765 IEM_MC_BEGIN(2, 2);
6766 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
6767 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6768 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6769
6770 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6771 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6772 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6773 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
6774 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
6775
6776 IEM_MC_ADVANCE_RIP();
6777 IEM_MC_END();
6778 return VINF_SUCCESS;
6779
6780 case IEMMODE_32BIT:
6781 IEM_MC_BEGIN(2, 2);
6782 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
6783 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6784 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6785
6786 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6787 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6788 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6789 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
6790 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
6791
6792 IEM_MC_ADVANCE_RIP();
6793 IEM_MC_END();
6794 return VINF_SUCCESS;
6795
6796 case IEMMODE_64BIT:
6797 IEM_MC_BEGIN(2, 2);
6798 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
6799 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6800 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6801
6802 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6803 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6804 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6805 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
6806 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
6807
6808 IEM_MC_ADVANCE_RIP();
6809 IEM_MC_END();
6810 return VINF_SUCCESS;
6811
6812 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6813 }
6814 }
6815}
6816
6817
6818/** Opcode 0x88. */
6819FNIEMOP_DEF(iemOp_mov_Eb_Gb)
6820{
6821 IEMOP_MNEMONIC("mov Eb,Gb");
6822
6823 uint8_t bRm;
6824 IEM_OPCODE_GET_NEXT_U8(&bRm);
6825 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6826
6827 /*
6828 * If rm is denoting a register, no more instruction bytes.
6829 */
6830 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6831 {
6832 IEM_MC_BEGIN(0, 1);
6833 IEM_MC_LOCAL(uint8_t, u8Value);
6834 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6835 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
6836 IEM_MC_ADVANCE_RIP();
6837 IEM_MC_END();
6838 }
6839 else
6840 {
6841 /*
6842 * We're writing a register to memory.
6843 */
6844 IEM_MC_BEGIN(0, 2);
6845 IEM_MC_LOCAL(uint8_t, u8Value);
6846 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6847 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6848 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6849 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
6850 IEM_MC_ADVANCE_RIP();
6851 IEM_MC_END();
6852 }
6853 return VINF_SUCCESS;
6854
6855}
6856
6857
6858/** Opcode 0x89. */
6859FNIEMOP_DEF(iemOp_mov_Ev_Gv)
6860{
6861 IEMOP_MNEMONIC("mov Ev,Gv");
6862
6863 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6864 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6865
6866 /*
6867 * If rm is denoting a register, no more instruction bytes.
6868 */
6869 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6870 {
6871 switch (pIemCpu->enmEffOpSize)
6872 {
6873 case IEMMODE_16BIT:
6874 IEM_MC_BEGIN(0, 1);
6875 IEM_MC_LOCAL(uint16_t, u16Value);
6876 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6877 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
6878 IEM_MC_ADVANCE_RIP();
6879 IEM_MC_END();
6880 break;
6881
6882 case IEMMODE_32BIT:
6883 IEM_MC_BEGIN(0, 1);
6884 IEM_MC_LOCAL(uint32_t, u32Value);
6885 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6886 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
6887 IEM_MC_ADVANCE_RIP();
6888 IEM_MC_END();
6889 break;
6890
6891 case IEMMODE_64BIT:
6892 IEM_MC_BEGIN(0, 1);
6893 IEM_MC_LOCAL(uint64_t, u64Value);
6894 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6895 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
6896 IEM_MC_ADVANCE_RIP();
6897 IEM_MC_END();
6898 break;
6899 }
6900 }
6901 else
6902 {
6903 /*
6904 * We're writing a register to memory.
6905 */
6906 switch (pIemCpu->enmEffOpSize)
6907 {
6908 case IEMMODE_16BIT:
6909 IEM_MC_BEGIN(0, 2);
6910 IEM_MC_LOCAL(uint16_t, u16Value);
6911 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6912 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6913 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6914 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
6915 IEM_MC_ADVANCE_RIP();
6916 IEM_MC_END();
6917 break;
6918
6919 case IEMMODE_32BIT:
6920 IEM_MC_BEGIN(0, 2);
6921 IEM_MC_LOCAL(uint32_t, u32Value);
6922 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6923 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6924 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6925 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
6926 IEM_MC_ADVANCE_RIP();
6927 IEM_MC_END();
6928 break;
6929
6930 case IEMMODE_64BIT:
6931 IEM_MC_BEGIN(0, 2);
6932 IEM_MC_LOCAL(uint64_t, u64Value);
6933 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6934 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6935 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6936 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
6937 IEM_MC_ADVANCE_RIP();
6938 IEM_MC_END();
6939 break;
6940 }
6941 }
6942 return VINF_SUCCESS;
6943}
6944
6945
6946/** Opcode 0x8a. */
6947FNIEMOP_DEF(iemOp_mov_Gb_Eb)
6948{
6949 IEMOP_MNEMONIC("mov Gb,Eb");
6950
6951 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6952 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6953
6954 /*
6955 * If rm is denoting a register, no more instruction bytes.
6956 */
6957 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6958 {
6959 IEM_MC_BEGIN(0, 1);
6960 IEM_MC_LOCAL(uint8_t, u8Value);
6961 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6962 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
6963 IEM_MC_ADVANCE_RIP();
6964 IEM_MC_END();
6965 }
6966 else
6967 {
6968 /*
6969 * We're loading a register from memory.
6970 */
6971 IEM_MC_BEGIN(0, 2);
6972 IEM_MC_LOCAL(uint8_t, u8Value);
6973 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6974 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6975 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
6976 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
6977 IEM_MC_ADVANCE_RIP();
6978 IEM_MC_END();
6979 }
6980 return VINF_SUCCESS;
6981}
6982
6983
6984/** Opcode 0x8b. */
6985FNIEMOP_DEF(iemOp_mov_Gv_Ev)
6986{
6987 IEMOP_MNEMONIC("mov Gv,Ev");
6988
6989 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6990 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6991
6992 /*
6993 * If rm is denoting a register, no more instruction bytes.
6994 */
6995 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6996 {
6997 switch (pIemCpu->enmEffOpSize)
6998 {
6999 case IEMMODE_16BIT:
7000 IEM_MC_BEGIN(0, 1);
7001 IEM_MC_LOCAL(uint16_t, u16Value);
7002 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7003 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7004 IEM_MC_ADVANCE_RIP();
7005 IEM_MC_END();
7006 break;
7007
7008 case IEMMODE_32BIT:
7009 IEM_MC_BEGIN(0, 1);
7010 IEM_MC_LOCAL(uint32_t, u32Value);
7011 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7012 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7013 IEM_MC_ADVANCE_RIP();
7014 IEM_MC_END();
7015 break;
7016
7017 case IEMMODE_64BIT:
7018 IEM_MC_BEGIN(0, 1);
7019 IEM_MC_LOCAL(uint64_t, u64Value);
7020 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7021 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7022 IEM_MC_ADVANCE_RIP();
7023 IEM_MC_END();
7024 break;
7025 }
7026 }
7027 else
7028 {
7029 /*
7030 * We're loading a register from memory.
7031 */
7032 switch (pIemCpu->enmEffOpSize)
7033 {
7034 case IEMMODE_16BIT:
7035 IEM_MC_BEGIN(0, 2);
7036 IEM_MC_LOCAL(uint16_t, u16Value);
7037 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7038 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7039 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7040 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7041 IEM_MC_ADVANCE_RIP();
7042 IEM_MC_END();
7043 break;
7044
7045 case IEMMODE_32BIT:
7046 IEM_MC_BEGIN(0, 2);
7047 IEM_MC_LOCAL(uint32_t, u32Value);
7048 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7049 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7050 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7051 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7052 IEM_MC_ADVANCE_RIP();
7053 IEM_MC_END();
7054 break;
7055
7056 case IEMMODE_64BIT:
7057 IEM_MC_BEGIN(0, 2);
7058 IEM_MC_LOCAL(uint64_t, u64Value);
7059 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7060 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7061 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7062 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7063 IEM_MC_ADVANCE_RIP();
7064 IEM_MC_END();
7065 break;
7066 }
7067 }
7068 return VINF_SUCCESS;
7069}
7070
7071
7072/** Opcode 0x8c. */
7073FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7074{
7075 IEMOP_MNEMONIC("mov Ev,Sw");
7076
7077 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7078 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7079
7080 /*
7081 * Check that the destination register exists. The REX.R prefix is ignored.
7082 */
7083 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7084 if ( iSegReg > X86_SREG_GS)
7085 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7086
7087 /*
7088 * If rm is denoting a register, no more instruction bytes.
7089 * In that case, the operand size is respected and the upper bits are
7090 * cleared (starting with some pentium).
7091 */
7092 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7093 {
7094 switch (pIemCpu->enmEffOpSize)
7095 {
7096 case IEMMODE_16BIT:
7097 IEM_MC_BEGIN(0, 1);
7098 IEM_MC_LOCAL(uint16_t, u16Value);
7099 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7100 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7101 IEM_MC_ADVANCE_RIP();
7102 IEM_MC_END();
7103 break;
7104
7105 case IEMMODE_32BIT:
7106 IEM_MC_BEGIN(0, 1);
7107 IEM_MC_LOCAL(uint32_t, u32Value);
7108 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7109 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7110 IEM_MC_ADVANCE_RIP();
7111 IEM_MC_END();
7112 break;
7113
7114 case IEMMODE_64BIT:
7115 IEM_MC_BEGIN(0, 1);
7116 IEM_MC_LOCAL(uint64_t, u64Value);
7117 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7118 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7119 IEM_MC_ADVANCE_RIP();
7120 IEM_MC_END();
7121 break;
7122 }
7123 }
7124 else
7125 {
7126 /*
7127 * We're saving the register to memory. The access is word sized
7128 * regardless of operand size prefixes.
7129 */
7130#if 0 /* not necessary */
7131 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7132#endif
7133 IEM_MC_BEGIN(0, 2);
7134 IEM_MC_LOCAL(uint16_t, u16Value);
7135 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7137 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7138 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7139 IEM_MC_ADVANCE_RIP();
7140 IEM_MC_END();
7141 }
7142 return VINF_SUCCESS;
7143}
7144
7145
7146
7147
7148/** Opcode 0x8d. */
7149FNIEMOP_DEF(iemOp_lea_Gv_M)
7150{
7151 IEMOP_MNEMONIC("lea Gv,M");
7152 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7153 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7154 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7155 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7156
7157 switch (pIemCpu->enmEffOpSize)
7158 {
7159 case IEMMODE_16BIT:
7160 IEM_MC_BEGIN(0, 2);
7161 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7162 IEM_MC_LOCAL(uint16_t, u16Cast);
7163 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7164 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
7165 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
7166 IEM_MC_ADVANCE_RIP();
7167 IEM_MC_END();
7168 return VINF_SUCCESS;
7169
7170 case IEMMODE_32BIT:
7171 IEM_MC_BEGIN(0, 2);
7172 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7173 IEM_MC_LOCAL(uint32_t, u32Cast);
7174 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7175 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
7176 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
7177 IEM_MC_ADVANCE_RIP();
7178 IEM_MC_END();
7179 return VINF_SUCCESS;
7180
7181 case IEMMODE_64BIT:
7182 IEM_MC_BEGIN(0, 1);
7183 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7184 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7185 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7186 IEM_MC_ADVANCE_RIP();
7187 IEM_MC_END();
7188 return VINF_SUCCESS;
7189 }
7190 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7191}
7192
7193
7194/** Opcode 0x8e. */
7195FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7196{
7197 IEMOP_MNEMONIC("mov Sw,Ev");
7198
7199 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7200 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7201
7202 /*
7203 * The practical operand size is 16-bit.
7204 */
7205#if 0 /* not necessary */
7206 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7207#endif
7208
7209 /*
7210 * Check that the destination register exists and can be used with this
7211 * instruction. The REX.R prefix is ignored.
7212 */
7213 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7214 if ( iSegReg == X86_SREG_CS
7215 || iSegReg > X86_SREG_GS)
7216 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7217
7218 /*
7219 * If rm is denoting a register, no more instruction bytes.
7220 */
7221 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7222 {
7223 IEM_MC_BEGIN(2, 0);
7224 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7225 IEM_MC_ARG(uint16_t, u16Value, 1);
7226 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7227 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7228 IEM_MC_END();
7229 }
7230 else
7231 {
7232 /*
7233 * We're loading the register from memory. The access is word sized
7234 * regardless of operand size prefixes.
7235 */
7236 IEM_MC_BEGIN(2, 1);
7237 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7238 IEM_MC_ARG(uint16_t, u16Value, 1);
7239 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7240 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7241 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7242 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7243 IEM_MC_END();
7244 }
7245 return VINF_SUCCESS;
7246}
7247
7248
7249/** Opcode 0x8f. */
7250FNIEMOP_DEF(iemOp_pop_Ev)
7251{
7252 /* This bugger is rather annoying as it requires rSP to be updated before
7253 doing the effective address calculations. Will eventually require a
7254 split between the R/M+SIB decoding and the effective address
7255 calculation - which is something that is required for any attempt at
7256 reusing this code for a recompiler. It may also be good to have if we
7257 need to delay #UD exception caused by invalid lock prefixes.
7258
7259 For now, we'll do a mostly safe interpreter-only implementation here. */
7260 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7261 * now until tests show it's checked.. */
7262 IEMOP_MNEMONIC("pop Ev");
7263 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7264 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7265
7266 /* Register access is relatively easy and can share code. */
7267 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7268 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7269
7270 /*
7271 * Memory target.
7272 *
7273 * Intel says that RSP is incremented before it's used in any effective
7274 * address calcuations. This means some serious extra annoyance here since
7275 * we decode and caclulate the effective address in one step and like to
7276 * delay committing registers till everything is done.
7277 *
7278 * So, we'll decode and calculate the effective address twice. This will
7279 * require some recoding if turned into a recompiler.
7280 */
7281 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7282
7283#ifndef TST_IEM_CHECK_MC
7284 /* Calc effective address with modified ESP. */
7285 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7286 RTGCPTR GCPtrEff;
7287 VBOXSTRICTRC rcStrict;
7288 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7289 if (rcStrict != VINF_SUCCESS)
7290 return rcStrict;
7291 pIemCpu->offOpcode = offOpcodeSaved;
7292
7293 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7294 uint64_t const RspSaved = pCtx->rsp;
7295 switch (pIemCpu->enmEffOpSize)
7296 {
7297 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7298 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7299 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7301 }
7302 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7303 Assert(rcStrict == VINF_SUCCESS);
7304 pCtx->rsp = RspSaved;
7305
7306 /* Perform the operation - this should be CImpl. */
7307 RTUINT64U TmpRsp;
7308 TmpRsp.u = pCtx->rsp;
7309 switch (pIemCpu->enmEffOpSize)
7310 {
7311 case IEMMODE_16BIT:
7312 {
7313 uint16_t u16Value;
7314 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7315 if (rcStrict == VINF_SUCCESS)
7316 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7317 break;
7318 }
7319
7320 case IEMMODE_32BIT:
7321 {
7322 uint32_t u32Value;
7323 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7324 if (rcStrict == VINF_SUCCESS)
7325 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7326 break;
7327 }
7328
7329 case IEMMODE_64BIT:
7330 {
7331 uint64_t u64Value;
7332 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7333 if (rcStrict == VINF_SUCCESS)
7334 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7335 break;
7336 }
7337
7338 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7339 }
7340 if (rcStrict == VINF_SUCCESS)
7341 {
7342 pCtx->rsp = TmpRsp.u;
7343 iemRegUpdateRip(pIemCpu);
7344 }
7345 return rcStrict;
7346
7347#else
7348 return VERR_NOT_IMPLEMENTED;
7349#endif
7350}
7351
7352
7353/**
7354 * Common 'xchg reg,rAX' helper.
7355 */
7356FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7357{
7358 IEMOP_HLP_NO_LOCK_PREFIX();
7359
7360 iReg |= pIemCpu->uRexB;
7361 switch (pIemCpu->enmEffOpSize)
7362 {
7363 case IEMMODE_16BIT:
7364 IEM_MC_BEGIN(0, 2);
7365 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7366 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7367 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7368 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7369 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7370 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7371 IEM_MC_ADVANCE_RIP();
7372 IEM_MC_END();
7373 return VINF_SUCCESS;
7374
7375 case IEMMODE_32BIT:
7376 IEM_MC_BEGIN(0, 2);
7377 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7378 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7379 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7380 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7381 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7382 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7383 IEM_MC_ADVANCE_RIP();
7384 IEM_MC_END();
7385 return VINF_SUCCESS;
7386
7387 case IEMMODE_64BIT:
7388 IEM_MC_BEGIN(0, 2);
7389 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7390 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7391 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7392 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7393 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7394 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7395 IEM_MC_ADVANCE_RIP();
7396 IEM_MC_END();
7397 return VINF_SUCCESS;
7398
7399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7400 }
7401}
7402
7403
7404/** Opcode 0x90. */
7405FNIEMOP_DEF(iemOp_nop)
7406{
7407 /* R8/R8D and RAX/EAX can be exchanged. */
7408 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7409 {
7410 IEMOP_MNEMONIC("xchg r8,rAX");
7411 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7412 }
7413
7414 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7415 IEMOP_MNEMONIC("pause");
7416 else
7417 IEMOP_MNEMONIC("nop");
7418 IEM_MC_BEGIN(0, 0);
7419 IEM_MC_ADVANCE_RIP();
7420 IEM_MC_END();
7421 return VINF_SUCCESS;
7422}
7423
7424
7425/** Opcode 0x91. */
7426FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7427{
7428 IEMOP_MNEMONIC("xchg rCX,rAX");
7429 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7430}
7431
7432
7433/** Opcode 0x92. */
7434FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7435{
7436 IEMOP_MNEMONIC("xchg rDX,rAX");
7437 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7438}
7439
7440
7441/** Opcode 0x93. */
7442FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7443{
7444 IEMOP_MNEMONIC("xchg rBX,rAX");
7445 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7446}
7447
7448
7449/** Opcode 0x94. */
7450FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7451{
7452 IEMOP_MNEMONIC("xchg rSX,rAX");
7453 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
7454}
7455
7456
7457/** Opcode 0x95. */
7458FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
7459{
7460 IEMOP_MNEMONIC("xchg rBP,rAX");
7461 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
7462}
7463
7464
7465/** Opcode 0x96. */
7466FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
7467{
7468 IEMOP_MNEMONIC("xchg rSI,rAX");
7469 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
7470}
7471
7472
7473/** Opcode 0x97. */
7474FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
7475{
7476 IEMOP_MNEMONIC("xchg rDI,rAX");
7477 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
7478}
7479
7480
7481/** Opcode 0x98. */
7482FNIEMOP_STUB(iemOp_cbw);
7483
7484
7485/** Opcode 0x99. */
7486FNIEMOP_DEF(iemOp_cwd)
7487{
7488 IEMOP_HLP_NO_LOCK_PREFIX();
7489 switch (pIemCpu->enmEffOpSize)
7490 {
7491 case IEMMODE_16BIT:
7492 IEMOP_MNEMONIC("cwd");
7493 IEM_MC_BEGIN(0, 1);
7494 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7495 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
7496 } IEM_MC_ELSE() {
7497 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
7498 } IEM_MC_ENDIF();
7499 IEM_MC_ADVANCE_RIP();
7500 IEM_MC_END();
7501 return VINF_SUCCESS;
7502
7503 case IEMMODE_32BIT:
7504 IEMOP_MNEMONIC("cwq");
7505 IEM_MC_BEGIN(0, 1);
7506 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7507 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
7508 } IEM_MC_ELSE() {
7509 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
7510 } IEM_MC_ENDIF();
7511 IEM_MC_ADVANCE_RIP();
7512 IEM_MC_END();
7513 return VINF_SUCCESS;
7514
7515 case IEMMODE_64BIT:
7516 IEMOP_MNEMONIC("cqo");
7517 IEM_MC_BEGIN(0, 1);
7518 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
7519 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
7520 } IEM_MC_ELSE() {
7521 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
7522 } IEM_MC_ENDIF();
7523 IEM_MC_ADVANCE_RIP();
7524 IEM_MC_END();
7525 return VINF_SUCCESS;
7526
7527 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7528 }
7529}
7530
7531
7532/** Opcode 0x9a. */
7533FNIEMOP_STUB(iemOp_call_Ap);
7534
7535
7536/** Opcode 0x9b. (aka fwait) */
7537FNIEMOP_DEF(iemOp_wait)
7538{
7539 IEMOP_MNEMONIC("wait");
7540 IEMOP_HLP_NO_LOCK_PREFIX();
7541
7542 IEM_MC_BEGIN(0, 0);
7543 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
7544 IEM_MC_MAYBE_RAISE_FPU_XCPT();
7545 IEM_MC_ADVANCE_RIP();
7546 IEM_MC_END();
7547 return VINF_SUCCESS;
7548}
7549
7550
7551/** Opcode 0x9c. */
7552FNIEMOP_DEF(iemOp_pushf_Fv)
7553{
7554 IEMOP_HLP_NO_LOCK_PREFIX();
7555 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7556 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
7557}
7558
7559
7560/** Opcode 0x9d. */
7561FNIEMOP_DEF(iemOp_popf_Fv)
7562{
7563 IEMOP_HLP_NO_LOCK_PREFIX();
7564 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7565 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
7566}
7567
7568
7569/** Opcode 0x9e. */
7570FNIEMOP_STUB(iemOp_sahf);
7571/** Opcode 0x9f. */
7572FNIEMOP_STUB(iemOp_lahf);
7573
7574/**
7575 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
7576 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
7577 * prefixes. Will return on failures.
7578 * @param a_GCPtrMemOff The variable to store the offset in.
7579 */
7580#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
7581 do \
7582 { \
7583 switch (pIemCpu->enmEffAddrMode) \
7584 { \
7585 case IEMMODE_16BIT: \
7586 { \
7587 uint16_t u16Off; IEM_OPCODE_GET_NEXT_U16(&u16Off); \
7588 (a_GCPtrMemOff) = u16Off; \
7589 break; \
7590 } \
7591 case IEMMODE_32BIT: \
7592 { \
7593 uint32_t u32Off; IEM_OPCODE_GET_NEXT_U32(&u32Off); \
7594 (a_GCPtrMemOff) = u32Off; \
7595 break; \
7596 } \
7597 case IEMMODE_64BIT: \
7598 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
7599 break; \
7600 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
7601 } \
7602 IEMOP_HLP_NO_LOCK_PREFIX(); \
7603 } while (0)
7604
7605/** Opcode 0xa0. */
7606FNIEMOP_DEF(iemOp_mov_Al_Ob)
7607{
7608 /*
7609 * Get the offset and fend of lock prefixes.
7610 */
7611 RTGCPTR GCPtrMemOff;
7612 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7613
7614 /*
7615 * Fetch AL.
7616 */
7617 IEM_MC_BEGIN(0,1);
7618 IEM_MC_LOCAL(uint8_t, u8Tmp);
7619 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7620 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
7621 IEM_MC_ADVANCE_RIP();
7622 IEM_MC_END();
7623 return VINF_SUCCESS;
7624}
7625
7626
7627/** Opcode 0xa1. */
7628FNIEMOP_DEF(iemOp_mov_rAX_Ov)
7629{
7630 /*
7631 * Get the offset and fend of lock prefixes.
7632 */
7633 IEMOP_MNEMONIC("mov rAX,Ov");
7634 RTGCPTR GCPtrMemOff;
7635 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7636
7637 /*
7638 * Fetch rAX.
7639 */
7640 switch (pIemCpu->enmEffOpSize)
7641 {
7642 case IEMMODE_16BIT:
7643 IEM_MC_BEGIN(0,1);
7644 IEM_MC_LOCAL(uint16_t, u16Tmp);
7645 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7646 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
7647 IEM_MC_ADVANCE_RIP();
7648 IEM_MC_END();
7649 return VINF_SUCCESS;
7650
7651 case IEMMODE_32BIT:
7652 IEM_MC_BEGIN(0,1);
7653 IEM_MC_LOCAL(uint32_t, u32Tmp);
7654 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7655 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
7656 IEM_MC_ADVANCE_RIP();
7657 IEM_MC_END();
7658 return VINF_SUCCESS;
7659
7660 case IEMMODE_64BIT:
7661 IEM_MC_BEGIN(0,1);
7662 IEM_MC_LOCAL(uint64_t, u64Tmp);
7663 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7664 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
7665 IEM_MC_ADVANCE_RIP();
7666 IEM_MC_END();
7667 return VINF_SUCCESS;
7668
7669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7670 }
7671}
7672
7673
7674/** Opcode 0xa2. */
7675FNIEMOP_DEF(iemOp_mov_Ob_AL)
7676{
7677 /*
7678 * Get the offset and fend of lock prefixes.
7679 */
7680 RTGCPTR GCPtrMemOff;
7681 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7682
7683 /*
7684 * Store AL.
7685 */
7686 IEM_MC_BEGIN(0,1);
7687 IEM_MC_LOCAL(uint8_t, u8Tmp);
7688 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
7689 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
7690 IEM_MC_ADVANCE_RIP();
7691 IEM_MC_END();
7692 return VINF_SUCCESS;
7693}
7694
7695
7696/** Opcode 0xa3. */
7697FNIEMOP_DEF(iemOp_mov_Ov_rAX)
7698{
7699 /*
7700 * Get the offset and fend of lock prefixes.
7701 */
7702 RTGCPTR GCPtrMemOff;
7703 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7704
7705 /*
7706 * Store rAX.
7707 */
7708 switch (pIemCpu->enmEffOpSize)
7709 {
7710 case IEMMODE_16BIT:
7711 IEM_MC_BEGIN(0,1);
7712 IEM_MC_LOCAL(uint16_t, u16Tmp);
7713 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
7714 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
7715 IEM_MC_ADVANCE_RIP();
7716 IEM_MC_END();
7717 return VINF_SUCCESS;
7718
7719 case IEMMODE_32BIT:
7720 IEM_MC_BEGIN(0,1);
7721 IEM_MC_LOCAL(uint32_t, u32Tmp);
7722 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
7723 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
7724 IEM_MC_ADVANCE_RIP();
7725 IEM_MC_END();
7726 return VINF_SUCCESS;
7727
7728 case IEMMODE_64BIT:
7729 IEM_MC_BEGIN(0,1);
7730 IEM_MC_LOCAL(uint64_t, u64Tmp);
7731 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
7732 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
7733 IEM_MC_ADVANCE_RIP();
7734 IEM_MC_END();
7735 return VINF_SUCCESS;
7736
7737 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7738 }
7739}
7740
7741/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
7742#define IEM_MOVS_CASE(ValBits, AddrBits) \
7743 IEM_MC_BEGIN(0, 2); \
7744 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
7745 IEM_MC_LOCAL(RTGCPTR, uAddr); \
7746 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
7747 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
7748 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
7749 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
7750 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
7751 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7752 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7753 } IEM_MC_ELSE() { \
7754 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7755 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7756 } IEM_MC_ENDIF(); \
7757 IEM_MC_ADVANCE_RIP(); \
7758 IEM_MC_END();
7759
7760/** Opcode 0xa4. */
7761FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
7762{
7763 IEMOP_HLP_NO_LOCK_PREFIX();
7764
7765 /*
7766 * Use the C implementation if a repeat prefix is encountered.
7767 */
7768 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
7769 {
7770 IEMOP_MNEMONIC("rep movsb Xb,Yb");
7771 switch (pIemCpu->enmEffAddrMode)
7772 {
7773 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
7774 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
7775 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
7776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7777 }
7778 }
7779 IEMOP_MNEMONIC("movsb Xb,Yb");
7780
7781 /*
7782 * Sharing case implementation with movs[wdq] below.
7783 */
7784 switch (pIemCpu->enmEffAddrMode)
7785 {
7786 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
7787 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
7788 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
7789 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7790 }
7791 return VINF_SUCCESS;
7792}
7793
7794
7795/** Opcode 0xa5. */
7796FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
7797{
7798 IEMOP_HLP_NO_LOCK_PREFIX();
7799
7800 /*
7801 * Use the C implementation if a repeat prefix is encountered.
7802 */
7803 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
7804 {
7805 IEMOP_MNEMONIC("rep movs Xv,Yv");
7806 switch (pIemCpu->enmEffOpSize)
7807 {
7808 case IEMMODE_16BIT:
7809 switch (pIemCpu->enmEffAddrMode)
7810 {
7811 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
7812 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
7813 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
7814 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7815 }
7816 break;
7817 case IEMMODE_32BIT:
7818 switch (pIemCpu->enmEffAddrMode)
7819 {
7820 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
7821 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
7822 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
7823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7824 }
7825 case IEMMODE_64BIT:
7826 switch (pIemCpu->enmEffAddrMode)
7827 {
7828 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7829 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
7830 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
7831 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7832 }
7833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7834 }
7835 }
7836 IEMOP_MNEMONIC("movs Xv,Yv");
7837
7838 /*
7839 * Annoying double switch here.
7840 * Using ugly macro for implementing the cases, sharing it with movsb.
7841 */
7842 switch (pIemCpu->enmEffOpSize)
7843 {
7844 case IEMMODE_16BIT:
7845 switch (pIemCpu->enmEffAddrMode)
7846 {
7847 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
7848 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
7849 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
7850 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7851 }
7852 break;
7853
7854 case IEMMODE_32BIT:
7855 switch (pIemCpu->enmEffAddrMode)
7856 {
7857 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
7858 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
7859 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
7860 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7861 }
7862 break;
7863
7864 case IEMMODE_64BIT:
7865 switch (pIemCpu->enmEffAddrMode)
7866 {
7867 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
7868 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
7869 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
7870 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7871 }
7872 break;
7873 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7874 }
7875 return VINF_SUCCESS;
7876}
7877
7878#undef IEM_MOVS_CASE
7879
7880/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
7881#define IEM_CMPS_CASE(ValBits, AddrBits) \
7882 IEM_MC_BEGIN(3, 3); \
7883 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
7884 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
7885 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
7886 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
7887 IEM_MC_LOCAL(RTGCPTR, uAddr); \
7888 \
7889 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
7890 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
7891 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
7892 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
7893 IEM_MC_REF_LOCAL(puValue1, uValue1); \
7894 IEM_MC_REF_EFLAGS(pEFlags); \
7895 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
7896 \
7897 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
7898 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7899 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7900 } IEM_MC_ELSE() { \
7901 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7902 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7903 } IEM_MC_ENDIF(); \
7904 IEM_MC_ADVANCE_RIP(); \
7905 IEM_MC_END(); \
7906
7907/** Opcode 0xa6. */
7908FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
7909{
7910 IEMOP_HLP_NO_LOCK_PREFIX();
7911
7912 /*
7913 * Use the C implementation if a repeat prefix is encountered.
7914 */
7915 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
7916 {
7917 IEMOP_MNEMONIC("repe cmps Xb,Yb");
7918 switch (pIemCpu->enmEffAddrMode)
7919 {
7920 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
7921 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
7922 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
7923 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7924 }
7925 }
7926 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
7927 {
7928 IEMOP_MNEMONIC("repe cmps Xb,Yb");
7929 switch (pIemCpu->enmEffAddrMode)
7930 {
7931 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
7932 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
7933 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
7934 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7935 }
7936 }
7937 IEMOP_MNEMONIC("cmps Xb,Yb");
7938
7939 /*
7940 * Sharing case implementation with cmps[wdq] below.
7941 */
7942 switch (pIemCpu->enmEffAddrMode)
7943 {
7944 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
7945 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
7946 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
7947 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7948 }
7949 return VINF_SUCCESS;
7950
7951}
7952
7953
7954/** Opcode 0xa7. */
7955FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
7956{
7957 IEMOP_HLP_NO_LOCK_PREFIX();
7958
7959 /*
7960 * Use the C implementation if a repeat prefix is encountered.
7961 */
7962 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
7963 {
7964 IEMOP_MNEMONIC("repe cmps Xv,Yv");
7965 switch (pIemCpu->enmEffOpSize)
7966 {
7967 case IEMMODE_16BIT:
7968 switch (pIemCpu->enmEffAddrMode)
7969 {
7970 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
7971 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
7972 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
7973 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7974 }
7975 break;
7976 case IEMMODE_32BIT:
7977 switch (pIemCpu->enmEffAddrMode)
7978 {
7979 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
7980 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
7981 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
7982 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7983 }
7984 case IEMMODE_64BIT:
7985 switch (pIemCpu->enmEffAddrMode)
7986 {
7987 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7988 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
7989 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
7990 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7991 }
7992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7993 }
7994 }
7995
7996 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
7997 {
7998 IEMOP_MNEMONIC("repne cmps Xv,Yv");
7999 switch (pIemCpu->enmEffOpSize)
8000 {
8001 case IEMMODE_16BIT:
8002 switch (pIemCpu->enmEffAddrMode)
8003 {
8004 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
8005 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
8006 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
8007 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8008 }
8009 break;
8010 case IEMMODE_32BIT:
8011 switch (pIemCpu->enmEffAddrMode)
8012 {
8013 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8014 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8015 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8016 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8017 }
8018 case IEMMODE_64BIT:
8019 switch (pIemCpu->enmEffAddrMode)
8020 {
8021 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8022 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8023 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8024 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8025 }
8026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8027 }
8028 }
8029
8030 IEMOP_MNEMONIC("cmps Xv,Yv");
8031
8032 /*
8033 * Annoying double switch here.
8034 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8035 */
8036 switch (pIemCpu->enmEffOpSize)
8037 {
8038 case IEMMODE_16BIT:
8039 switch (pIemCpu->enmEffAddrMode)
8040 {
8041 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8042 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8043 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8045 }
8046 break;
8047
8048 case IEMMODE_32BIT:
8049 switch (pIemCpu->enmEffAddrMode)
8050 {
8051 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8052 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8053 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8055 }
8056 break;
8057
8058 case IEMMODE_64BIT:
8059 switch (pIemCpu->enmEffAddrMode)
8060 {
8061 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8062 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8063 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8064 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8065 }
8066 break;
8067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8068 }
8069 return VINF_SUCCESS;
8070
8071}
8072
8073#undef IEM_CMPS_CASE
8074
8075/** Opcode 0xa8. */
8076FNIEMOP_DEF(iemOp_test_AL_Ib)
8077{
8078 IEMOP_MNEMONIC("test al,Ib");
8079 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8080 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8081}
8082
8083
8084/** Opcode 0xa9. */
8085FNIEMOP_DEF(iemOp_test_eAX_Iz)
8086{
8087 IEMOP_MNEMONIC("test rAX,Iz");
8088 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8089 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8090}
8091
8092
8093/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8094#define IEM_STOS_CASE(ValBits, AddrBits) \
8095 IEM_MC_BEGIN(0, 2); \
8096 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8097 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8098 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8099 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8100 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8101 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8102 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8103 } IEM_MC_ELSE() { \
8104 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8105 } IEM_MC_ENDIF(); \
8106 IEM_MC_ADVANCE_RIP(); \
8107 IEM_MC_END(); \
8108
8109/** Opcode 0xaa. */
8110FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8111{
8112 IEMOP_HLP_NO_LOCK_PREFIX();
8113
8114 /*
8115 * Use the C implementation if a repeat prefix is encountered.
8116 */
8117 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8118 {
8119 IEMOP_MNEMONIC("rep stos Yb,al");
8120 switch (pIemCpu->enmEffAddrMode)
8121 {
8122 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8123 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8124 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8126 }
8127 }
8128 IEMOP_MNEMONIC("stos Yb,al");
8129
8130 /*
8131 * Sharing case implementation with stos[wdq] below.
8132 */
8133 switch (pIemCpu->enmEffAddrMode)
8134 {
8135 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8136 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8137 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8139 }
8140 return VINF_SUCCESS;
8141}
8142
8143
8144/** Opcode 0xab. */
8145FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8146{
8147 IEMOP_HLP_NO_LOCK_PREFIX();
8148
8149 /*
8150 * Use the C implementation if a repeat prefix is encountered.
8151 */
8152 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8153 {
8154 IEMOP_MNEMONIC("rep stos Yv,rAX");
8155 switch (pIemCpu->enmEffOpSize)
8156 {
8157 case IEMMODE_16BIT:
8158 switch (pIemCpu->enmEffAddrMode)
8159 {
8160 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8161 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8162 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8163 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8164 }
8165 break;
8166 case IEMMODE_32BIT:
8167 switch (pIemCpu->enmEffAddrMode)
8168 {
8169 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8170 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8171 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8172 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8173 }
8174 case IEMMODE_64BIT:
8175 switch (pIemCpu->enmEffAddrMode)
8176 {
8177 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8178 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8179 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8181 }
8182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8183 }
8184 }
8185 IEMOP_MNEMONIC("stos Yv,rAX");
8186
8187 /*
8188 * Annoying double switch here.
8189 * Using ugly macro for implementing the cases, sharing it with stosb.
8190 */
8191 switch (pIemCpu->enmEffOpSize)
8192 {
8193 case IEMMODE_16BIT:
8194 switch (pIemCpu->enmEffAddrMode)
8195 {
8196 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8197 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8198 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8200 }
8201 break;
8202
8203 case IEMMODE_32BIT:
8204 switch (pIemCpu->enmEffAddrMode)
8205 {
8206 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8207 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8208 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8209 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8210 }
8211 break;
8212
8213 case IEMMODE_64BIT:
8214 switch (pIemCpu->enmEffAddrMode)
8215 {
8216 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8217 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8218 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8219 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8220 }
8221 break;
8222 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8223 }
8224 return VINF_SUCCESS;
8225}
8226
8227#undef IEM_STOS_CASE
8228
8229/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8230#define IEM_LODS_CASE(ValBits, AddrBits) \
8231 IEM_MC_BEGIN(0, 2); \
8232 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8233 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8234 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8235 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8236 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8238 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8239 } IEM_MC_ELSE() { \
8240 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8241 } IEM_MC_ENDIF(); \
8242 IEM_MC_ADVANCE_RIP(); \
8243 IEM_MC_END();
8244
8245/** Opcode 0xac. */
8246FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8247{
8248 IEMOP_HLP_NO_LOCK_PREFIX();
8249
8250 /*
8251 * Use the C implementation if a repeat prefix is encountered.
8252 */
8253 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8254 {
8255 IEMOP_MNEMONIC("rep lodsb al,Xb");
8256 switch (pIemCpu->enmEffAddrMode)
8257 {
8258 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8259 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8260 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8261 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8262 }
8263 }
8264 IEMOP_MNEMONIC("lodsb al,Xb");
8265
8266 /*
8267 * Sharing case implementation with stos[wdq] below.
8268 */
8269 switch (pIemCpu->enmEffAddrMode)
8270 {
8271 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8272 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8273 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8274 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8275 }
8276 return VINF_SUCCESS;
8277}
8278
8279
8280/** Opcode 0xad. */
8281FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8282{
8283 IEMOP_HLP_NO_LOCK_PREFIX();
8284
8285 /*
8286 * Use the C implementation if a repeat prefix is encountered.
8287 */
8288 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8289 {
8290 IEMOP_MNEMONIC("rep lods rAX,Xv");
8291 switch (pIemCpu->enmEffOpSize)
8292 {
8293 case IEMMODE_16BIT:
8294 switch (pIemCpu->enmEffAddrMode)
8295 {
8296 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8297 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8298 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8299 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8300 }
8301 break;
8302 case IEMMODE_32BIT:
8303 switch (pIemCpu->enmEffAddrMode)
8304 {
8305 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8306 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8307 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8308 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8309 }
8310 case IEMMODE_64BIT:
8311 switch (pIemCpu->enmEffAddrMode)
8312 {
8313 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8314 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8315 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8316 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8317 }
8318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8319 }
8320 }
8321 IEMOP_MNEMONIC("lods rAX,Xv");
8322
8323 /*
8324 * Annoying double switch here.
8325 * Using ugly macro for implementing the cases, sharing it with lodsb.
8326 */
8327 switch (pIemCpu->enmEffOpSize)
8328 {
8329 case IEMMODE_16BIT:
8330 switch (pIemCpu->enmEffAddrMode)
8331 {
8332 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8333 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8334 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8335 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8336 }
8337 break;
8338
8339 case IEMMODE_32BIT:
8340 switch (pIemCpu->enmEffAddrMode)
8341 {
8342 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8343 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8344 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8346 }
8347 break;
8348
8349 case IEMMODE_64BIT:
8350 switch (pIemCpu->enmEffAddrMode)
8351 {
8352 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8353 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8354 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8355 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8356 }
8357 break;
8358 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8359 }
8360 return VINF_SUCCESS;
8361}
8362
8363#undef IEM_LODS_CASE
8364
8365/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
8366#define IEM_SCAS_CASE(ValBits, AddrBits) \
8367 IEM_MC_BEGIN(3, 2); \
8368 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
8369 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
8370 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8371 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8372 \
8373 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8374 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
8375 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
8376 IEM_MC_REF_EFLAGS(pEFlags); \
8377 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
8378 \
8379 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8380 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8381 } IEM_MC_ELSE() { \
8382 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8383 } IEM_MC_ENDIF(); \
8384 IEM_MC_ADVANCE_RIP(); \
8385 IEM_MC_END();
8386
8387/** Opcode 0xae. */
8388FNIEMOP_DEF(iemOp_scasb_AL_Xb)
8389{
8390 IEMOP_HLP_NO_LOCK_PREFIX();
8391
8392 /*
8393 * Use the C implementation if a repeat prefix is encountered.
8394 */
8395 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8396 {
8397 IEMOP_MNEMONIC("repe scasb al,Xb");
8398 switch (pIemCpu->enmEffAddrMode)
8399 {
8400 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
8401 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
8402 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
8403 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8404 }
8405 }
8406 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8407 {
8408 IEMOP_MNEMONIC("repne scasb al,Xb");
8409 switch (pIemCpu->enmEffAddrMode)
8410 {
8411 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
8412 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
8413 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
8414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8415 }
8416 }
8417 IEMOP_MNEMONIC("scasb al,Xb");
8418
8419 /*
8420 * Sharing case implementation with stos[wdq] below.
8421 */
8422 switch (pIemCpu->enmEffAddrMode)
8423 {
8424 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
8425 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
8426 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
8427 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8428 }
8429 return VINF_SUCCESS;
8430}
8431
8432
8433/** Opcode 0xaf. */
8434FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
8435{
8436 IEMOP_HLP_NO_LOCK_PREFIX();
8437
8438 /*
8439 * Use the C implementation if a repeat prefix is encountered.
8440 */
8441 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8442 {
8443 IEMOP_MNEMONIC("repe scas rAX,Xv");
8444 switch (pIemCpu->enmEffOpSize)
8445 {
8446 case IEMMODE_16BIT:
8447 switch (pIemCpu->enmEffAddrMode)
8448 {
8449 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8450 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8451 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8452 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8453 }
8454 break;
8455 case IEMMODE_32BIT:
8456 switch (pIemCpu->enmEffAddrMode)
8457 {
8458 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8459 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8460 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8461 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8462 }
8463 case IEMMODE_64BIT:
8464 switch (pIemCpu->enmEffAddrMode)
8465 {
8466 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
8467 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8468 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8469 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8470 }
8471 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8472 }
8473 }
8474 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8475 {
8476 IEMOP_MNEMONIC("repne scas rAX,Xv");
8477 switch (pIemCpu->enmEffOpSize)
8478 {
8479 case IEMMODE_16BIT:
8480 switch (pIemCpu->enmEffAddrMode)
8481 {
8482 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8483 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8484 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8485 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8486 }
8487 break;
8488 case IEMMODE_32BIT:
8489 switch (pIemCpu->enmEffAddrMode)
8490 {
8491 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8492 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8493 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8494 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8495 }
8496 case IEMMODE_64BIT:
8497 switch (pIemCpu->enmEffAddrMode)
8498 {
8499 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8500 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8501 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8503 }
8504 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8505 }
8506 }
8507 IEMOP_MNEMONIC("scas rAX,Xv");
8508
8509 /*
8510 * Annoying double switch here.
8511 * Using ugly macro for implementing the cases, sharing it with scasb.
8512 */
8513 switch (pIemCpu->enmEffOpSize)
8514 {
8515 case IEMMODE_16BIT:
8516 switch (pIemCpu->enmEffAddrMode)
8517 {
8518 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
8519 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
8520 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
8521 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8522 }
8523 break;
8524
8525 case IEMMODE_32BIT:
8526 switch (pIemCpu->enmEffAddrMode)
8527 {
8528 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
8529 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
8530 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
8531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8532 }
8533 break;
8534
8535 case IEMMODE_64BIT:
8536 switch (pIemCpu->enmEffAddrMode)
8537 {
8538 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8539 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
8540 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
8541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8542 }
8543 break;
8544 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8545 }
8546 return VINF_SUCCESS;
8547}
8548
8549#undef IEM_SCAS_CASE
8550
8551/**
8552 * Common 'mov r8, imm8' helper.
8553 */
8554FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
8555{
8556 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8557 IEMOP_HLP_NO_LOCK_PREFIX();
8558
8559 IEM_MC_BEGIN(0, 1);
8560 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
8561 IEM_MC_STORE_GREG_U8(iReg, u8Value);
8562 IEM_MC_ADVANCE_RIP();
8563 IEM_MC_END();
8564
8565 return VINF_SUCCESS;
8566}
8567
8568
8569/** Opcode 0xb0. */
8570FNIEMOP_DEF(iemOp_mov_AL_Ib)
8571{
8572 IEMOP_MNEMONIC("mov AL,Ib");
8573 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
8574}
8575
8576
8577/** Opcode 0xb1. */
8578FNIEMOP_DEF(iemOp_CL_Ib)
8579{
8580 IEMOP_MNEMONIC("mov CL,Ib");
8581 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
8582}
8583
8584
8585/** Opcode 0xb2. */
8586FNIEMOP_DEF(iemOp_DL_Ib)
8587{
8588 IEMOP_MNEMONIC("mov DL,Ib");
8589 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
8590}
8591
8592
8593/** Opcode 0xb3. */
8594FNIEMOP_DEF(iemOp_BL_Ib)
8595{
8596 IEMOP_MNEMONIC("mov BL,Ib");
8597 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
8598}
8599
8600
8601/** Opcode 0xb4. */
8602FNIEMOP_DEF(iemOp_mov_AH_Ib)
8603{
8604 IEMOP_MNEMONIC("mov AH,Ib");
8605 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
8606}
8607
8608
8609/** Opcode 0xb5. */
8610FNIEMOP_DEF(iemOp_CH_Ib)
8611{
8612 IEMOP_MNEMONIC("mov CH,Ib");
8613 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
8614}
8615
8616
8617/** Opcode 0xb6. */
8618FNIEMOP_DEF(iemOp_DH_Ib)
8619{
8620 IEMOP_MNEMONIC("mov DH,Ib");
8621 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
8622}
8623
8624
8625/** Opcode 0xb7. */
8626FNIEMOP_DEF(iemOp_BH_Ib)
8627{
8628 IEMOP_MNEMONIC("mov BH,Ib");
8629 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
8630}
8631
8632
8633/**
8634 * Common 'mov regX,immX' helper.
8635 */
8636FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
8637{
8638 switch (pIemCpu->enmEffOpSize)
8639 {
8640 case IEMMODE_16BIT:
8641 {
8642 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8643 IEMOP_HLP_NO_LOCK_PREFIX();
8644
8645 IEM_MC_BEGIN(0, 1);
8646 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
8647 IEM_MC_STORE_GREG_U16(iReg, u16Value);
8648 IEM_MC_ADVANCE_RIP();
8649 IEM_MC_END();
8650 break;
8651 }
8652
8653 case IEMMODE_32BIT:
8654 {
8655 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8656 IEMOP_HLP_NO_LOCK_PREFIX();
8657
8658 IEM_MC_BEGIN(0, 1);
8659 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
8660 IEM_MC_STORE_GREG_U32(iReg, u32Value);
8661 IEM_MC_ADVANCE_RIP();
8662 IEM_MC_END();
8663 break;
8664 }
8665 case IEMMODE_64BIT:
8666 {
8667 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
8668 IEMOP_HLP_NO_LOCK_PREFIX();
8669
8670 IEM_MC_BEGIN(0, 1);
8671 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
8672 IEM_MC_STORE_GREG_U64(iReg, u64Value);
8673 IEM_MC_ADVANCE_RIP();
8674 IEM_MC_END();
8675 break;
8676 }
8677 }
8678
8679 return VINF_SUCCESS;
8680}
8681
8682
8683/** Opcode 0xb8. */
8684FNIEMOP_DEF(iemOp_eAX_Iv)
8685{
8686 IEMOP_MNEMONIC("mov rAX,IV");
8687 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
8688}
8689
8690
8691/** Opcode 0xb9. */
8692FNIEMOP_DEF(iemOp_eCX_Iv)
8693{
8694 IEMOP_MNEMONIC("mov rCX,IV");
8695 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
8696}
8697
8698
8699/** Opcode 0xba. */
8700FNIEMOP_DEF(iemOp_eDX_Iv)
8701{
8702 IEMOP_MNEMONIC("mov rDX,IV");
8703 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
8704}
8705
8706
8707/** Opcode 0xbb. */
8708FNIEMOP_DEF(iemOp_eBX_Iv)
8709{
8710 IEMOP_MNEMONIC("mov rBX,IV");
8711 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
8712}
8713
8714
8715/** Opcode 0xbc. */
8716FNIEMOP_DEF(iemOp_eSP_Iv)
8717{
8718 IEMOP_MNEMONIC("mov rSP,IV");
8719 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
8720}
8721
8722
8723/** Opcode 0xbd. */
8724FNIEMOP_DEF(iemOp_eBP_Iv)
8725{
8726 IEMOP_MNEMONIC("mov rBP,IV");
8727 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
8728}
8729
8730
8731/** Opcode 0xbe. */
8732FNIEMOP_DEF(iemOp_eSI_Iv)
8733{
8734 IEMOP_MNEMONIC("mov rSI,IV");
8735 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
8736}
8737
8738
8739/** Opcode 0xbf. */
8740FNIEMOP_DEF(iemOp_eDI_Iv)
8741{
8742 IEMOP_MNEMONIC("mov rDI,IV");
8743 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
8744}
8745
8746
8747/** Opcode 0xc0. */
8748FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
8749{
8750 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8751 PCIEMOPSHIFTSIZES pImpl;
8752 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
8753 {
8754 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
8755 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
8756 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
8757 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
8758 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
8759 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
8760 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
8761 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8762 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
8763 }
8764 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
8765
8766 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8767 {
8768 /* register */
8769 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8770 IEMOP_HLP_NO_LOCK_PREFIX();
8771 IEM_MC_BEGIN(3, 0);
8772 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
8773 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8774 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8775 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8776 IEM_MC_REF_EFLAGS(pEFlags);
8777 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
8778 IEM_MC_ADVANCE_RIP();
8779 IEM_MC_END();
8780 }
8781 else
8782 {
8783 /* memory */
8784 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8785 IEM_MC_BEGIN(3, 2);
8786 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
8787 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8788 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8789 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8790
8791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8792 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8793 IEM_MC_ASSIGN(cShiftArg, cShift);
8794 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8795 IEM_MC_FETCH_EFLAGS(EFlags);
8796 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
8797
8798 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
8799 IEM_MC_COMMIT_EFLAGS(EFlags);
8800 IEM_MC_ADVANCE_RIP();
8801 IEM_MC_END();
8802 }
8803 return VINF_SUCCESS;
8804}
8805
8806
8807/** Opcode 0xc1. */
8808FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
8809{
8810 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8811 PCIEMOPSHIFTSIZES pImpl;
8812 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
8813 {
8814 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
8815 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
8816 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
8817 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
8818 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
8819 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
8820 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
8821 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8822 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
8823 }
8824 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
8825
8826 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8827 {
8828 /* register */
8829 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8830 IEMOP_HLP_NO_LOCK_PREFIX();
8831 switch (pIemCpu->enmEffOpSize)
8832 {
8833 case IEMMODE_16BIT:
8834 IEM_MC_BEGIN(3, 0);
8835 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8836 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8837 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8838 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8839 IEM_MC_REF_EFLAGS(pEFlags);
8840 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
8841 IEM_MC_ADVANCE_RIP();
8842 IEM_MC_END();
8843 return VINF_SUCCESS;
8844
8845 case IEMMODE_32BIT:
8846 IEM_MC_BEGIN(3, 0);
8847 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8848 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8849 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8850 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8851 IEM_MC_REF_EFLAGS(pEFlags);
8852 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
8853 IEM_MC_ADVANCE_RIP();
8854 IEM_MC_END();
8855 return VINF_SUCCESS;
8856
8857 case IEMMODE_64BIT:
8858 IEM_MC_BEGIN(3, 0);
8859 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8860 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8861 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8862 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8863 IEM_MC_REF_EFLAGS(pEFlags);
8864 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
8865 IEM_MC_ADVANCE_RIP();
8866 IEM_MC_END();
8867 return VINF_SUCCESS;
8868
8869 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8870 }
8871 }
8872 else
8873 {
8874 /* memory */
8875 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8876 switch (pIemCpu->enmEffOpSize)
8877 {
8878 case IEMMODE_16BIT:
8879 IEM_MC_BEGIN(3, 2);
8880 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8881 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8882 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8883 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8884
8885 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8886 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8887 IEM_MC_ASSIGN(cShiftArg, cShift);
8888 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8889 IEM_MC_FETCH_EFLAGS(EFlags);
8890 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
8891
8892 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
8893 IEM_MC_COMMIT_EFLAGS(EFlags);
8894 IEM_MC_ADVANCE_RIP();
8895 IEM_MC_END();
8896 return VINF_SUCCESS;
8897
8898 case IEMMODE_32BIT:
8899 IEM_MC_BEGIN(3, 2);
8900 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8901 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8902 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8903 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8904
8905 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8906 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8907 IEM_MC_ASSIGN(cShiftArg, cShift);
8908 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8909 IEM_MC_FETCH_EFLAGS(EFlags);
8910 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
8911
8912 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
8913 IEM_MC_COMMIT_EFLAGS(EFlags);
8914 IEM_MC_ADVANCE_RIP();
8915 IEM_MC_END();
8916 return VINF_SUCCESS;
8917
8918 case IEMMODE_64BIT:
8919 IEM_MC_BEGIN(3, 2);
8920 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8921 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8922 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8923 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8924
8925 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8926 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8927 IEM_MC_ASSIGN(cShiftArg, cShift);
8928 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8929 IEM_MC_FETCH_EFLAGS(EFlags);
8930 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
8931
8932 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
8933 IEM_MC_COMMIT_EFLAGS(EFlags);
8934 IEM_MC_ADVANCE_RIP();
8935 IEM_MC_END();
8936 return VINF_SUCCESS;
8937
8938 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8939 }
8940 }
8941}
8942
8943
8944/** Opcode 0xc2. */
8945FNIEMOP_DEF(iemOp_retn_Iw)
8946{
8947 IEMOP_MNEMONIC("retn Iw");
8948 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8949 IEMOP_HLP_NO_LOCK_PREFIX();
8950 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8951 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
8952}
8953
8954
8955/** Opcode 0xc3. */
8956FNIEMOP_DEF(iemOp_retn)
8957{
8958 IEMOP_MNEMONIC("retn");
8959 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8960 IEMOP_HLP_NO_LOCK_PREFIX();
8961 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
8962}
8963
8964
8965/** Opcode 0xc4. */
8966FNIEMOP_DEF(iemOp_les_Gv_Mp)
8967{
8968 IEMOP_MNEMONIC("les Gv,Mp");
8969 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
8970}
8971
8972
8973/** Opcode 0xc5. */
8974FNIEMOP_DEF(iemOp_lds_Gv_Mp)
8975{
8976 IEMOP_MNEMONIC("lds Gv,Mp");
8977 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
8978}
8979
8980
8981/** Opcode 0xc6. */
8982FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
8983{
8984 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8985 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8986 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
8987 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8988 IEMOP_MNEMONIC("mov Eb,Ib");
8989
8990 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8991 {
8992 /* register access */
8993 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8994 IEM_MC_BEGIN(0, 0);
8995 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
8996 IEM_MC_ADVANCE_RIP();
8997 IEM_MC_END();
8998 }
8999 else
9000 {
9001 /* memory access. */
9002 IEM_MC_BEGIN(0, 1);
9003 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9004 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9005 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9006 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
9007 IEM_MC_ADVANCE_RIP();
9008 IEM_MC_END();
9009 }
9010 return VINF_SUCCESS;
9011}
9012
9013
9014/** Opcode 0xc7. */
9015FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9016{
9017 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9018 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9019 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9020 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9021 IEMOP_MNEMONIC("mov Ev,Iz");
9022
9023 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9024 {
9025 /* register access */
9026 switch (pIemCpu->enmEffOpSize)
9027 {
9028 case IEMMODE_16BIT:
9029 IEM_MC_BEGIN(0, 0);
9030 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9031 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9032 IEM_MC_ADVANCE_RIP();
9033 IEM_MC_END();
9034 return VINF_SUCCESS;
9035
9036 case IEMMODE_32BIT:
9037 IEM_MC_BEGIN(0, 0);
9038 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9039 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9040 IEM_MC_ADVANCE_RIP();
9041 IEM_MC_END();
9042 return VINF_SUCCESS;
9043
9044 case IEMMODE_64BIT:
9045 IEM_MC_BEGIN(0, 0);
9046 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9047 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9048 IEM_MC_ADVANCE_RIP();
9049 IEM_MC_END();
9050 return VINF_SUCCESS;
9051
9052 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9053 }
9054 }
9055 else
9056 {
9057 /* memory access. */
9058 switch (pIemCpu->enmEffOpSize)
9059 {
9060 case IEMMODE_16BIT:
9061 IEM_MC_BEGIN(0, 1);
9062 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9063 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9064 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9065 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9066 IEM_MC_ADVANCE_RIP();
9067 IEM_MC_END();
9068 return VINF_SUCCESS;
9069
9070 case IEMMODE_32BIT:
9071 IEM_MC_BEGIN(0, 1);
9072 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9073 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9074 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9075 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9076 IEM_MC_ADVANCE_RIP();
9077 IEM_MC_END();
9078 return VINF_SUCCESS;
9079
9080 case IEMMODE_64BIT:
9081 IEM_MC_BEGIN(0, 1);
9082 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9083 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9084 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9085 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9086 IEM_MC_ADVANCE_RIP();
9087 IEM_MC_END();
9088 return VINF_SUCCESS;
9089
9090 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9091 }
9092 }
9093}
9094
9095
9096
9097
9098/** Opcode 0xc8. */
9099FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9100
9101
9102/** Opcode 0xc9. */
9103FNIEMOP_DEF(iemOp_leave)
9104{
9105 IEMOP_MNEMONIC("retn");
9106 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9107 IEMOP_HLP_NO_LOCK_PREFIX();
9108 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9109}
9110
9111
9112/** Opcode 0xca. */
9113FNIEMOP_DEF(iemOp_retf_Iw)
9114{
9115 IEMOP_MNEMONIC("retf Iw");
9116 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9117 IEMOP_HLP_NO_LOCK_PREFIX();
9118 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9119 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9120}
9121
9122
9123/** Opcode 0xcb. */
9124FNIEMOP_DEF(iemOp_retf)
9125{
9126 IEMOP_MNEMONIC("retf");
9127 IEMOP_HLP_NO_LOCK_PREFIX();
9128 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9129 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9130}
9131
9132
9133/** Opcode 0xcc. */
9134FNIEMOP_DEF(iemOp_int_3)
9135{
9136 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9137}
9138
9139
9140/** Opcode 0xcd. */
9141FNIEMOP_DEF(iemOp_int_Ib)
9142{
9143 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
9144 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9145}
9146
9147
9148/** Opcode 0xce. */
9149FNIEMOP_DEF(iemOp_into)
9150{
9151 IEM_MC_BEGIN(2, 0);
9152 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9153 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9154 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9155 IEM_MC_END();
9156 return VINF_SUCCESS;
9157}
9158
9159
9160/** Opcode 0xcf. */
9161FNIEMOP_DEF(iemOp_iret)
9162{
9163 IEMOP_MNEMONIC("iret");
9164 IEMOP_HLP_NO_LOCK_PREFIX();
9165 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9166}
9167
9168
9169/** Opcode 0xd0. */
9170FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9171{
9172 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9173 PCIEMOPSHIFTSIZES pImpl;
9174 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9175 {
9176 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9177 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9178 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9179 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9180 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9181 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9182 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9183 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9184 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9185 }
9186 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9187
9188 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9189 {
9190 /* register */
9191 IEMOP_HLP_NO_LOCK_PREFIX();
9192 IEM_MC_BEGIN(3, 0);
9193 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9194 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9195 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9196 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9197 IEM_MC_REF_EFLAGS(pEFlags);
9198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9199 IEM_MC_ADVANCE_RIP();
9200 IEM_MC_END();
9201 }
9202 else
9203 {
9204 /* memory */
9205 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9206 IEM_MC_BEGIN(3, 2);
9207 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9208 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9209 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9210 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9211
9212 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9213 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9214 IEM_MC_FETCH_EFLAGS(EFlags);
9215 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9216
9217 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9218 IEM_MC_COMMIT_EFLAGS(EFlags);
9219 IEM_MC_ADVANCE_RIP();
9220 IEM_MC_END();
9221 }
9222 return VINF_SUCCESS;
9223}
9224
9225
9226
9227/** Opcode 0xd1. */
9228FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9229{
9230 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9231 PCIEMOPSHIFTSIZES pImpl;
9232 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9233 {
9234 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9235 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9236 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9237 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9238 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9239 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9240 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9241 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9242 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9243 }
9244 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9245
9246 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9247 {
9248 /* register */
9249 IEMOP_HLP_NO_LOCK_PREFIX();
9250 switch (pIemCpu->enmEffOpSize)
9251 {
9252 case IEMMODE_16BIT:
9253 IEM_MC_BEGIN(3, 0);
9254 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9255 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9256 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9257 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9258 IEM_MC_REF_EFLAGS(pEFlags);
9259 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9260 IEM_MC_ADVANCE_RIP();
9261 IEM_MC_END();
9262 return VINF_SUCCESS;
9263
9264 case IEMMODE_32BIT:
9265 IEM_MC_BEGIN(3, 0);
9266 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9267 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9268 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9269 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9270 IEM_MC_REF_EFLAGS(pEFlags);
9271 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9272 IEM_MC_ADVANCE_RIP();
9273 IEM_MC_END();
9274 return VINF_SUCCESS;
9275
9276 case IEMMODE_64BIT:
9277 IEM_MC_BEGIN(3, 0);
9278 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9279 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9280 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9281 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9282 IEM_MC_REF_EFLAGS(pEFlags);
9283 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9284 IEM_MC_ADVANCE_RIP();
9285 IEM_MC_END();
9286 return VINF_SUCCESS;
9287
9288 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9289 }
9290 }
9291 else
9292 {
9293 /* memory */
9294 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9295 switch (pIemCpu->enmEffOpSize)
9296 {
9297 case IEMMODE_16BIT:
9298 IEM_MC_BEGIN(3, 2);
9299 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9300 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9301 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9302 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9303
9304 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9305 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9306 IEM_MC_FETCH_EFLAGS(EFlags);
9307 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9308
9309 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9310 IEM_MC_COMMIT_EFLAGS(EFlags);
9311 IEM_MC_ADVANCE_RIP();
9312 IEM_MC_END();
9313 return VINF_SUCCESS;
9314
9315 case IEMMODE_32BIT:
9316 IEM_MC_BEGIN(3, 2);
9317 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9318 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9319 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9320 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9321
9322 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9323 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9324 IEM_MC_FETCH_EFLAGS(EFlags);
9325 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9326
9327 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9328 IEM_MC_COMMIT_EFLAGS(EFlags);
9329 IEM_MC_ADVANCE_RIP();
9330 IEM_MC_END();
9331 return VINF_SUCCESS;
9332
9333 case IEMMODE_64BIT:
9334 IEM_MC_BEGIN(3, 2);
9335 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9336 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9337 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9338 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9339
9340 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9341 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9342 IEM_MC_FETCH_EFLAGS(EFlags);
9343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9344
9345 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9346 IEM_MC_COMMIT_EFLAGS(EFlags);
9347 IEM_MC_ADVANCE_RIP();
9348 IEM_MC_END();
9349 return VINF_SUCCESS;
9350
9351 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9352 }
9353 }
9354}
9355
9356
9357/** Opcode 0xd2. */
9358FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9359{
9360 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9361 PCIEMOPSHIFTSIZES pImpl;
9362 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9363 {
9364 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9365 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
9366 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
9367 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
9368 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
9369 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
9370 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
9371 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9372 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
9373 }
9374 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9375
9376 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9377 {
9378 /* register */
9379 IEMOP_HLP_NO_LOCK_PREFIX();
9380 IEM_MC_BEGIN(3, 0);
9381 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9382 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9383 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9384 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9385 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9386 IEM_MC_REF_EFLAGS(pEFlags);
9387 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9388 IEM_MC_ADVANCE_RIP();
9389 IEM_MC_END();
9390 }
9391 else
9392 {
9393 /* memory */
9394 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9395 IEM_MC_BEGIN(3, 2);
9396 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9397 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9398 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9399 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9400
9401 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9402 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9403 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9404 IEM_MC_FETCH_EFLAGS(EFlags);
9405 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9406
9407 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9408 IEM_MC_COMMIT_EFLAGS(EFlags);
9409 IEM_MC_ADVANCE_RIP();
9410 IEM_MC_END();
9411 }
9412 return VINF_SUCCESS;
9413}
9414
9415
9416/** Opcode 0xd3. */
9417FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
9418{
9419 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9420 PCIEMOPSHIFTSIZES pImpl;
9421 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9422 {
9423 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
9424 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
9425 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
9426 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
9427 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
9428 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
9429 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
9430 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9431 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9432 }
9433 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9434
9435 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9436 {
9437 /* register */
9438 IEMOP_HLP_NO_LOCK_PREFIX();
9439 switch (pIemCpu->enmEffOpSize)
9440 {
9441 case IEMMODE_16BIT:
9442 IEM_MC_BEGIN(3, 0);
9443 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9444 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9445 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9446 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9447 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9448 IEM_MC_REF_EFLAGS(pEFlags);
9449 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9450 IEM_MC_ADVANCE_RIP();
9451 IEM_MC_END();
9452 return VINF_SUCCESS;
9453
9454 case IEMMODE_32BIT:
9455 IEM_MC_BEGIN(3, 0);
9456 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9457 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9458 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9459 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9460 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9461 IEM_MC_REF_EFLAGS(pEFlags);
9462 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9463 IEM_MC_ADVANCE_RIP();
9464 IEM_MC_END();
9465 return VINF_SUCCESS;
9466
9467 case IEMMODE_64BIT:
9468 IEM_MC_BEGIN(3, 0);
9469 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9470 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9471 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9472 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9473 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9474 IEM_MC_REF_EFLAGS(pEFlags);
9475 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9476 IEM_MC_ADVANCE_RIP();
9477 IEM_MC_END();
9478 return VINF_SUCCESS;
9479
9480 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9481 }
9482 }
9483 else
9484 {
9485 /* memory */
9486 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9487 switch (pIemCpu->enmEffOpSize)
9488 {
9489 case IEMMODE_16BIT:
9490 IEM_MC_BEGIN(3, 2);
9491 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9492 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9493 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9494 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9495
9496 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9497 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9498 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9499 IEM_MC_FETCH_EFLAGS(EFlags);
9500 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9501
9502 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9503 IEM_MC_COMMIT_EFLAGS(EFlags);
9504 IEM_MC_ADVANCE_RIP();
9505 IEM_MC_END();
9506 return VINF_SUCCESS;
9507
9508 case IEMMODE_32BIT:
9509 IEM_MC_BEGIN(3, 2);
9510 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9511 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9512 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9513 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9514
9515 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9516 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9517 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9518 IEM_MC_FETCH_EFLAGS(EFlags);
9519 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9520
9521 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9522 IEM_MC_COMMIT_EFLAGS(EFlags);
9523 IEM_MC_ADVANCE_RIP();
9524 IEM_MC_END();
9525 return VINF_SUCCESS;
9526
9527 case IEMMODE_64BIT:
9528 IEM_MC_BEGIN(3, 2);
9529 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9530 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9531 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9532 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9533
9534 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9535 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9536 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9537 IEM_MC_FETCH_EFLAGS(EFlags);
9538 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9539
9540 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9541 IEM_MC_COMMIT_EFLAGS(EFlags);
9542 IEM_MC_ADVANCE_RIP();
9543 IEM_MC_END();
9544 return VINF_SUCCESS;
9545
9546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9547 }
9548 }
9549}
9550
9551/** Opcode 0xd4. */
9552FNIEMOP_STUB(iemOp_aam_Ib);
9553/** Opcode 0xd5. */
9554FNIEMOP_STUB(iemOp_aad_Ib);
9555
9556
9557/** Opcode 0xd7. */
9558FNIEMOP_DEF(iemOp_xlat)
9559{
9560 IEMOP_MNEMONIC("xlat");
9561 IEMOP_HLP_NO_LOCK_PREFIX();
9562 switch (pIemCpu->enmEffAddrMode)
9563 {
9564 case IEMMODE_16BIT:
9565 IEM_MC_BEGIN(2, 0);
9566 IEM_MC_LOCAL(uint8_t, u8Tmp);
9567 IEM_MC_LOCAL(uint16_t, u16Addr);
9568 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
9569 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
9570 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
9571 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9572 IEM_MC_ADVANCE_RIP();
9573 IEM_MC_END();
9574 return VINF_SUCCESS;
9575
9576 case IEMMODE_32BIT:
9577 IEM_MC_BEGIN(2, 0);
9578 IEM_MC_LOCAL(uint8_t, u8Tmp);
9579 IEM_MC_LOCAL(uint32_t, u32Addr);
9580 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
9581 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
9582 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
9583 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9584 IEM_MC_ADVANCE_RIP();
9585 IEM_MC_END();
9586 return VINF_SUCCESS;
9587
9588 case IEMMODE_64BIT:
9589 IEM_MC_BEGIN(2, 0);
9590 IEM_MC_LOCAL(uint8_t, u8Tmp);
9591 IEM_MC_LOCAL(uint64_t, u64Addr);
9592 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
9593 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
9594 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
9595 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9596 IEM_MC_ADVANCE_RIP();
9597 IEM_MC_END();
9598 return VINF_SUCCESS;
9599
9600 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9601 }
9602}
9603
9604
9605/** Opcode 0xd8. */
9606FNIEMOP_STUB(iemOp_EscF0);
9607/** Opcode 0xd9. */
9608FNIEMOP_STUB(iemOp_EscF1);
9609/** Opcode 0xda. */
9610FNIEMOP_STUB(iemOp_EscF2);
9611
9612
9613/** Opcode 0xdb /0. */
9614FNIEMOP_STUB_1(iemOp_fild_dw, uint8_t, bRm);
9615/** Opcode 0xdb /1. */
9616FNIEMOP_STUB_1(iemOp_fisttp_dw, uint8_t, bRm);
9617/** Opcode 0xdb /2. */
9618FNIEMOP_STUB_1(iemOp_fist_dw, uint8_t, bRm);
9619/** Opcode 0xdb /3. */
9620FNIEMOP_STUB_1(iemOp_fistp_dw, uint8_t, bRm);
9621/** Opcode 0xdb /5. */
9622FNIEMOP_STUB_1(iemOp_fld_xr, uint8_t, bRm);
9623/** Opcode 0xdb /7. */
9624FNIEMOP_STUB_1(iemOp_fstp_xr, uint8_t, bRm);
9625
9626
9627/** Opcode 0xdb 0xe0. */
9628FNIEMOP_DEF(iemOp_fneni)
9629{
9630 IEMOP_MNEMONIC("fneni (8087/ign)");
9631 IEM_MC_BEGIN(0,0);
9632 IEM_MC_ADVANCE_RIP();
9633 IEM_MC_END();
9634 return VINF_SUCCESS;
9635}
9636
9637
9638/** Opcode 0xdb 0xe1. */
9639FNIEMOP_DEF(iemOp_fndisi)
9640{
9641 IEMOP_MNEMONIC("fndisi (8087/ign)");
9642 IEM_MC_BEGIN(0,0);
9643 IEM_MC_ADVANCE_RIP();
9644 IEM_MC_END();
9645 return VINF_SUCCESS;
9646}
9647
9648
9649/** Opcode 0xdb 0xe2. */
9650FNIEMOP_STUB(iemOp_fnclex);
9651
9652
9653/** Opcode 0xdb 0xe3. */
9654FNIEMOP_DEF(iemOp_fninit)
9655{
9656 IEMOP_MNEMONIC("fninit");
9657 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
9658}
9659
9660
9661/** Opcode 0xdb 0xe4. */
9662FNIEMOP_DEF(iemOp_fnsetpm)
9663{
9664 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
9665 IEM_MC_BEGIN(0,0);
9666 IEM_MC_ADVANCE_RIP();
9667 IEM_MC_END();
9668 return VINF_SUCCESS;
9669}
9670
9671
9672/** Opcode 0xdb 0xe5. */
9673FNIEMOP_DEF(iemOp_frstpm)
9674{
9675 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
9676 IEM_MC_BEGIN(0,0);
9677 IEM_MC_ADVANCE_RIP();
9678 IEM_MC_END();
9679 return VINF_SUCCESS;
9680}
9681
9682
9683/** Opcode 0xdb. */
9684FNIEMOP_DEF(iemOp_EscF3)
9685{
9686 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9687 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9688 {
9689 switch (bRm & 0xf8)
9690 {
9691 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnb
9692 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovne
9693 case 0xd0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnbe
9694 case 0xd8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnu
9695 case 0xe0:
9696 IEMOP_HLP_NO_LOCK_PREFIX();
9697 switch (bRm)
9698 {
9699 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
9700 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
9701 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
9702 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
9703 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
9704 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
9705 default: return IEMOP_RAISE_INVALID_OPCODE();
9706 }
9707 break;
9708 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomi
9709 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomi
9710 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
9711 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9712 }
9713 }
9714 else
9715 {
9716 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9717 {
9718 case 0: return FNIEMOP_CALL_1(iemOp_fild_dw, bRm);
9719 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_dw,bRm);
9720 case 2: return FNIEMOP_CALL_1(iemOp_fist_dw, bRm);
9721 case 3: return FNIEMOP_CALL_1(iemOp_fistp_dw, bRm);
9722 case 4: return IEMOP_RAISE_INVALID_OPCODE();
9723 case 5: return FNIEMOP_CALL_1(iemOp_fld_xr, bRm);
9724 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9725 case 7: return FNIEMOP_CALL_1(iemOp_fstp_xr, bRm);
9726 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9727 }
9728 }
9729}
9730
9731/** Opcode 0xdc. */
9732FNIEMOP_STUB(iemOp_EscF4);
9733/** Opcode 0xdd. */
9734FNIEMOP_STUB(iemOp_EscF5);
9735
9736/** Opcode 0xde 0xd9. */
9737FNIEMOP_STUB(iemOp_fcompp);
9738
9739/** Opcode 0xde. */
9740FNIEMOP_DEF(iemOp_EscF6)
9741{
9742 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9743 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9744 {
9745 switch (bRm & 0xf8)
9746 {
9747 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fiaddp
9748 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fimulp
9749 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
9750 case 0xd8:
9751 switch (bRm)
9752 {
9753 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
9754 default: return IEMOP_RAISE_INVALID_OPCODE();
9755 }
9756 case 0xe0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubrp
9757 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubp
9758 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivrp
9759 case 0xf8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivp
9760 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9761 }
9762 }
9763 else
9764 {
9765#if 0
9766 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9767 {
9768 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
9769 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
9770 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
9771 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
9772 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
9773 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
9774 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
9775 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
9776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9777 }
9778#endif
9779 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
9780 }
9781}
9782
9783
9784/** Opcode 0xdf 0xe0. */
9785FNIEMOP_DEF(iemOp_fnstsw_ax)
9786{
9787 IEMOP_MNEMONIC("fnstsw ax");
9788 IEMOP_HLP_NO_LOCK_PREFIX();
9789
9790 IEM_MC_BEGIN(0, 1);
9791 IEM_MC_LOCAL(uint16_t, u16Tmp);
9792 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
9793 IEM_MC_FETCH_FSW(u16Tmp);
9794 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
9795 IEM_MC_ADVANCE_RIP();
9796 IEM_MC_END();
9797 return VINF_SUCCESS;
9798}
9799
9800
9801/** Opcode 0xdf. */
9802FNIEMOP_DEF(iemOp_EscF7)
9803{
9804 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9805 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9806 {
9807 switch (bRm & 0xf8)
9808 {
9809 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
9810 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
9811 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
9812 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
9813 case 0xe0:
9814 switch (bRm)
9815 {
9816 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
9817 default: return IEMOP_RAISE_INVALID_OPCODE();
9818 }
9819 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomip
9820 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomip
9821 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
9822 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9823 }
9824 }
9825 else
9826 {
9827 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
9828 }
9829}
9830
9831
9832/** Opcode 0xe0. */
9833FNIEMOP_DEF(iemOp_loopne_Jb)
9834{
9835 IEMOP_MNEMONIC("loopne Jb");
9836 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9837 IEMOP_HLP_NO_LOCK_PREFIX();
9838 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9839
9840 switch (pIemCpu->enmEffAddrMode)
9841 {
9842 case IEMMODE_16BIT:
9843 IEM_MC_BEGIN(0,0);
9844 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
9845 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
9846 IEM_MC_REL_JMP_S8(i8Imm);
9847 } IEM_MC_ELSE() {
9848 IEM_MC_ADVANCE_RIP();
9849 } IEM_MC_ENDIF();
9850 IEM_MC_END();
9851 return VINF_SUCCESS;
9852
9853 case IEMMODE_32BIT:
9854 IEM_MC_BEGIN(0,0);
9855 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
9856 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
9857 IEM_MC_REL_JMP_S8(i8Imm);
9858 } IEM_MC_ELSE() {
9859 IEM_MC_ADVANCE_RIP();
9860 } IEM_MC_ENDIF();
9861 IEM_MC_END();
9862 return VINF_SUCCESS;
9863
9864 case IEMMODE_64BIT:
9865 IEM_MC_BEGIN(0,0);
9866 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
9867 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
9868 IEM_MC_REL_JMP_S8(i8Imm);
9869 } IEM_MC_ELSE() {
9870 IEM_MC_ADVANCE_RIP();
9871 } IEM_MC_ENDIF();
9872 IEM_MC_END();
9873 return VINF_SUCCESS;
9874
9875 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9876 }
9877}
9878
9879
9880/** Opcode 0xe1. */
9881FNIEMOP_DEF(iemOp_loope_Jb)
9882{
9883 IEMOP_MNEMONIC("loope Jb");
9884 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9885 IEMOP_HLP_NO_LOCK_PREFIX();
9886 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9887
9888 switch (pIemCpu->enmEffAddrMode)
9889 {
9890 case IEMMODE_16BIT:
9891 IEM_MC_BEGIN(0,0);
9892 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
9893 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
9894 IEM_MC_REL_JMP_S8(i8Imm);
9895 } IEM_MC_ELSE() {
9896 IEM_MC_ADVANCE_RIP();
9897 } IEM_MC_ENDIF();
9898 IEM_MC_END();
9899 return VINF_SUCCESS;
9900
9901 case IEMMODE_32BIT:
9902 IEM_MC_BEGIN(0,0);
9903 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
9904 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
9905 IEM_MC_REL_JMP_S8(i8Imm);
9906 } IEM_MC_ELSE() {
9907 IEM_MC_ADVANCE_RIP();
9908 } IEM_MC_ENDIF();
9909 IEM_MC_END();
9910 return VINF_SUCCESS;
9911
9912 case IEMMODE_64BIT:
9913 IEM_MC_BEGIN(0,0);
9914 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
9915 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
9916 IEM_MC_REL_JMP_S8(i8Imm);
9917 } IEM_MC_ELSE() {
9918 IEM_MC_ADVANCE_RIP();
9919 } IEM_MC_ENDIF();
9920 IEM_MC_END();
9921 return VINF_SUCCESS;
9922
9923 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9924 }
9925}
9926
9927
9928/** Opcode 0xe2. */
9929FNIEMOP_DEF(iemOp_loop_Jb)
9930{
9931 IEMOP_MNEMONIC("loop Jb");
9932 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9933 IEMOP_HLP_NO_LOCK_PREFIX();
9934 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9935
9936 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
9937 * using the 32-bit operand size override. How can that be restarted? See
9938 * weird pseudo code in intel manual. */
9939 switch (pIemCpu->enmEffAddrMode)
9940 {
9941 case IEMMODE_16BIT:
9942 IEM_MC_BEGIN(0,0);
9943 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
9944 IEM_MC_IF_CX_IS_NZ() {
9945 IEM_MC_REL_JMP_S8(i8Imm);
9946 } IEM_MC_ELSE() {
9947 IEM_MC_ADVANCE_RIP();
9948 } IEM_MC_ENDIF();
9949 IEM_MC_END();
9950 return VINF_SUCCESS;
9951
9952 case IEMMODE_32BIT:
9953 IEM_MC_BEGIN(0,0);
9954 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
9955 IEM_MC_IF_ECX_IS_NZ() {
9956 IEM_MC_REL_JMP_S8(i8Imm);
9957 } IEM_MC_ELSE() {
9958 IEM_MC_ADVANCE_RIP();
9959 } IEM_MC_ENDIF();
9960 IEM_MC_END();
9961 return VINF_SUCCESS;
9962
9963 case IEMMODE_64BIT:
9964 IEM_MC_BEGIN(0,0);
9965 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
9966 IEM_MC_IF_RCX_IS_NZ() {
9967 IEM_MC_REL_JMP_S8(i8Imm);
9968 } IEM_MC_ELSE() {
9969 IEM_MC_ADVANCE_RIP();
9970 } IEM_MC_ENDIF();
9971 IEM_MC_END();
9972 return VINF_SUCCESS;
9973
9974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9975 }
9976}
9977
9978
9979/** Opcode 0xe3. */
9980FNIEMOP_DEF(iemOp_jecxz_Jb)
9981{
9982 IEMOP_MNEMONIC("jecxz Jb");
9983 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9984 IEMOP_HLP_NO_LOCK_PREFIX();
9985 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9986
9987 switch (pIemCpu->enmEffAddrMode)
9988 {
9989 case IEMMODE_16BIT:
9990 IEM_MC_BEGIN(0,0);
9991 IEM_MC_IF_CX_IS_NZ() {
9992 IEM_MC_ADVANCE_RIP();
9993 } IEM_MC_ELSE() {
9994 IEM_MC_REL_JMP_S8(i8Imm);
9995 } IEM_MC_ENDIF();
9996 IEM_MC_END();
9997 return VINF_SUCCESS;
9998
9999 case IEMMODE_32BIT:
10000 IEM_MC_BEGIN(0,0);
10001 IEM_MC_IF_ECX_IS_NZ() {
10002 IEM_MC_ADVANCE_RIP();
10003 } IEM_MC_ELSE() {
10004 IEM_MC_REL_JMP_S8(i8Imm);
10005 } IEM_MC_ENDIF();
10006 IEM_MC_END();
10007 return VINF_SUCCESS;
10008
10009 case IEMMODE_64BIT:
10010 IEM_MC_BEGIN(0,0);
10011 IEM_MC_IF_RCX_IS_NZ() {
10012 IEM_MC_ADVANCE_RIP();
10013 } IEM_MC_ELSE() {
10014 IEM_MC_REL_JMP_S8(i8Imm);
10015 } IEM_MC_ENDIF();
10016 IEM_MC_END();
10017 return VINF_SUCCESS;
10018
10019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10020 }
10021}
10022
10023
10024/** Opcode 0xe4 */
10025FNIEMOP_DEF(iemOp_in_AL_Ib)
10026{
10027 IEMOP_MNEMONIC("in eAX,Ib");
10028 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10029 IEMOP_HLP_NO_LOCK_PREFIX();
10030 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
10031}
10032
10033
10034/** Opcode 0xe5 */
10035FNIEMOP_DEF(iemOp_in_eAX_Ib)
10036{
10037 IEMOP_MNEMONIC("in eAX,Ib");
10038 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10039 IEMOP_HLP_NO_LOCK_PREFIX();
10040 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10041}
10042
10043
10044/** Opcode 0xe6 */
10045FNIEMOP_DEF(iemOp_out_Ib_AL)
10046{
10047 IEMOP_MNEMONIC("out Ib,AL");
10048 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10049 IEMOP_HLP_NO_LOCK_PREFIX();
10050 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
10051}
10052
10053
10054/** Opcode 0xe7 */
10055FNIEMOP_DEF(iemOp_out_Ib_eAX)
10056{
10057 IEMOP_MNEMONIC("out Ib,eAX");
10058 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10059 IEMOP_HLP_NO_LOCK_PREFIX();
10060 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10061}
10062
10063
10064/** Opcode 0xe8. */
10065FNIEMOP_DEF(iemOp_call_Jv)
10066{
10067 IEMOP_MNEMONIC("call Jv");
10068 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10069 switch (pIemCpu->enmEffOpSize)
10070 {
10071 case IEMMODE_16BIT:
10072 {
10073 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10074 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int32_t)u16Imm);
10075 }
10076
10077 case IEMMODE_32BIT:
10078 {
10079 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10080 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
10081 }
10082
10083 case IEMMODE_64BIT:
10084 {
10085 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10086 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
10087 }
10088
10089 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10090 }
10091}
10092
10093
10094/** Opcode 0xe9. */
10095FNIEMOP_DEF(iemOp_jmp_Jv)
10096{
10097 IEMOP_MNEMONIC("jmp Jv");
10098 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10099 switch (pIemCpu->enmEffOpSize)
10100 {
10101 case IEMMODE_16BIT:
10102 {
10103 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
10104 IEM_MC_BEGIN(0, 0);
10105 IEM_MC_REL_JMP_S16(i16Imm);
10106 IEM_MC_END();
10107 return VINF_SUCCESS;
10108 }
10109
10110 case IEMMODE_64BIT:
10111 case IEMMODE_32BIT:
10112 {
10113 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
10114 IEM_MC_BEGIN(0, 0);
10115 IEM_MC_REL_JMP_S32(i32Imm);
10116 IEM_MC_END();
10117 return VINF_SUCCESS;
10118 }
10119
10120 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10121 }
10122}
10123
10124
10125/** Opcode 0xea. */
10126FNIEMOP_DEF(iemOp_jmp_Ap)
10127{
10128 IEMOP_MNEMONIC("jmp Ap");
10129 IEMOP_HLP_NO_64BIT();
10130
10131 /* Decode the far pointer address and pass it on to the far call C implementation. */
10132 uint32_t offSeg;
10133 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10134 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10135 else
10136 {
10137 uint16_t offSeg16; IEM_OPCODE_GET_NEXT_U16(&offSeg16);
10138 offSeg = offSeg16;
10139 }
10140 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10141 IEMOP_HLP_NO_LOCK_PREFIX();
10142 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
10143}
10144
10145
10146/** Opcode 0xeb. */
10147FNIEMOP_DEF(iemOp_jmp_Jb)
10148{
10149 IEMOP_MNEMONIC("jmp Jb");
10150 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10151 IEMOP_HLP_NO_LOCK_PREFIX();
10152 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10153
10154 IEM_MC_BEGIN(0, 0);
10155 IEM_MC_REL_JMP_S8(i8Imm);
10156 IEM_MC_END();
10157 return VINF_SUCCESS;
10158}
10159
10160
10161/** Opcode 0xec */
10162FNIEMOP_DEF(iemOp_in_AL_DX)
10163{
10164 IEMOP_MNEMONIC("in AL,DX");
10165 IEMOP_HLP_NO_LOCK_PREFIX();
10166 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
10167}
10168
10169
10170/** Opcode 0xed */
10171FNIEMOP_DEF(iemOp_eAX_DX)
10172{
10173 IEMOP_MNEMONIC("in eAX,DX");
10174 IEMOP_HLP_NO_LOCK_PREFIX();
10175 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10176}
10177
10178
10179/** Opcode 0xee */
10180FNIEMOP_DEF(iemOp_out_DX_AL)
10181{
10182 IEMOP_MNEMONIC("out DX,AL");
10183 IEMOP_HLP_NO_LOCK_PREFIX();
10184 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
10185}
10186
10187
10188/** Opcode 0xef */
10189FNIEMOP_DEF(iemOp_out_DX_eAX)
10190{
10191 IEMOP_MNEMONIC("out DX,eAX");
10192 IEMOP_HLP_NO_LOCK_PREFIX();
10193 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10194}
10195
10196
10197/** Opcode 0xf0. */
10198FNIEMOP_DEF(iemOp_lock)
10199{
10200 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
10201
10202 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10203 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10204}
10205
10206
10207/** Opcode 0xf2. */
10208FNIEMOP_DEF(iemOp_repne)
10209{
10210 /* This overrides any previous REPE prefix. */
10211 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
10212 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
10213
10214 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10215 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10216}
10217
10218
10219/** Opcode 0xf3. */
10220FNIEMOP_DEF(iemOp_repe)
10221{
10222 /* This overrides any previous REPNE prefix. */
10223 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
10224 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
10225
10226 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10227 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10228}
10229
10230
10231/** Opcode 0xf4. */
10232FNIEMOP_DEF(iemOp_hlt)
10233{
10234 IEMOP_HLP_NO_LOCK_PREFIX();
10235 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
10236}
10237
10238
10239/** Opcode 0xf5. */
10240FNIEMOP_STUB(iemOp_cmc);
10241
10242
10243/**
10244 * Common implementation of 'inc/dec/not/neg Eb'.
10245 *
10246 * @param bRm The RM byte.
10247 * @param pImpl The instruction implementation.
10248 */
10249FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10250{
10251 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10252 {
10253 /* register access */
10254 IEM_MC_BEGIN(2, 0);
10255 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10256 IEM_MC_ARG(uint32_t *, pEFlags, 1);
10257 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10258 IEM_MC_REF_EFLAGS(pEFlags);
10259 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10260 IEM_MC_ADVANCE_RIP();
10261 IEM_MC_END();
10262 }
10263 else
10264 {
10265 /* memory access. */
10266 IEM_MC_BEGIN(2, 2);
10267 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10268 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10269 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10270
10271 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10272 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10273 IEM_MC_FETCH_EFLAGS(EFlags);
10274 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10275 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10276 else
10277 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
10278
10279 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10280 IEM_MC_COMMIT_EFLAGS(EFlags);
10281 IEM_MC_ADVANCE_RIP();
10282 IEM_MC_END();
10283 }
10284 return VINF_SUCCESS;
10285}
10286
10287
10288/**
10289 * Common implementation of 'inc/dec/not/neg Ev'.
10290 *
10291 * @param bRm The RM byte.
10292 * @param pImpl The instruction implementation.
10293 */
10294FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10295{
10296 /* Registers are handled by a common worker. */
10297 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10298 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10299
10300 /* Memory we do here. */
10301 switch (pIemCpu->enmEffOpSize)
10302 {
10303 case IEMMODE_16BIT:
10304 IEM_MC_BEGIN(2, 2);
10305 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10306 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10307 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10308
10309 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10310 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10311 IEM_MC_FETCH_EFLAGS(EFlags);
10312 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10313 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
10314 else
10315 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
10316
10317 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10318 IEM_MC_COMMIT_EFLAGS(EFlags);
10319 IEM_MC_ADVANCE_RIP();
10320 IEM_MC_END();
10321 return VINF_SUCCESS;
10322
10323 case IEMMODE_32BIT:
10324 IEM_MC_BEGIN(2, 2);
10325 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10326 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10327 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10328
10329 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10330 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10331 IEM_MC_FETCH_EFLAGS(EFlags);
10332 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10333 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
10334 else
10335 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
10336
10337 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10338 IEM_MC_COMMIT_EFLAGS(EFlags);
10339 IEM_MC_ADVANCE_RIP();
10340 IEM_MC_END();
10341 return VINF_SUCCESS;
10342
10343 case IEMMODE_64BIT:
10344 IEM_MC_BEGIN(2, 2);
10345 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10346 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10347 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10348
10349 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10350 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10351 IEM_MC_FETCH_EFLAGS(EFlags);
10352 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10353 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
10354 else
10355 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
10356
10357 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10358 IEM_MC_COMMIT_EFLAGS(EFlags);
10359 IEM_MC_ADVANCE_RIP();
10360 IEM_MC_END();
10361 return VINF_SUCCESS;
10362
10363 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10364 }
10365}
10366
10367
10368/** Opcode 0xf6 /0. */
10369FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
10370{
10371 IEMOP_MNEMONIC("test Eb,Ib");
10372 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10373
10374 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10375 {
10376 /* register access */
10377 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10378 IEMOP_HLP_NO_LOCK_PREFIX();
10379
10380 IEM_MC_BEGIN(3, 0);
10381 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10382 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
10383 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10384 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10385 IEM_MC_REF_EFLAGS(pEFlags);
10386 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10387 IEM_MC_ADVANCE_RIP();
10388 IEM_MC_END();
10389 }
10390 else
10391 {
10392 /* memory access. */
10393 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10394
10395 IEM_MC_BEGIN(3, 2);
10396 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10397 IEM_MC_ARG(uint8_t, u8Src, 1);
10398 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10399 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10400
10401 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10402 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10403 IEM_MC_ASSIGN(u8Src, u8Imm);
10404 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10405 IEM_MC_FETCH_EFLAGS(EFlags);
10406 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10407
10408 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
10409 IEM_MC_COMMIT_EFLAGS(EFlags);
10410 IEM_MC_ADVANCE_RIP();
10411 IEM_MC_END();
10412 }
10413 return VINF_SUCCESS;
10414}
10415
10416
10417/** Opcode 0xf7 /0. */
10418FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
10419{
10420 IEMOP_MNEMONIC("test Ev,Iv");
10421 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10422 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10423
10424 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10425 {
10426 /* register access */
10427 switch (pIemCpu->enmEffOpSize)
10428 {
10429 case IEMMODE_16BIT:
10430 {
10431 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10432 IEM_MC_BEGIN(3, 0);
10433 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10434 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
10435 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10436 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10437 IEM_MC_REF_EFLAGS(pEFlags);
10438 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10439 IEM_MC_ADVANCE_RIP();
10440 IEM_MC_END();
10441 return VINF_SUCCESS;
10442 }
10443
10444 case IEMMODE_32BIT:
10445 {
10446 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10447 IEM_MC_BEGIN(3, 0);
10448 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10449 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
10450 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10451 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10452 IEM_MC_REF_EFLAGS(pEFlags);
10453 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10454 IEM_MC_ADVANCE_RIP();
10455 IEM_MC_END();
10456 return VINF_SUCCESS;
10457 }
10458
10459 case IEMMODE_64BIT:
10460 {
10461 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10462 IEM_MC_BEGIN(3, 0);
10463 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10464 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
10465 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10466 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10467 IEM_MC_REF_EFLAGS(pEFlags);
10468 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10469 IEM_MC_ADVANCE_RIP();
10470 IEM_MC_END();
10471 return VINF_SUCCESS;
10472 }
10473
10474 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10475 }
10476 }
10477 else
10478 {
10479 /* memory access. */
10480 switch (pIemCpu->enmEffOpSize)
10481 {
10482 case IEMMODE_16BIT:
10483 {
10484 IEM_MC_BEGIN(3, 2);
10485 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10486 IEM_MC_ARG(uint16_t, u16Src, 1);
10487 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10488 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10489
10490 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10491 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10492 IEM_MC_ASSIGN(u16Src, u16Imm);
10493 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10494 IEM_MC_FETCH_EFLAGS(EFlags);
10495 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10496
10497 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
10498 IEM_MC_COMMIT_EFLAGS(EFlags);
10499 IEM_MC_ADVANCE_RIP();
10500 IEM_MC_END();
10501 return VINF_SUCCESS;
10502 }
10503
10504 case IEMMODE_32BIT:
10505 {
10506 IEM_MC_BEGIN(3, 2);
10507 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10508 IEM_MC_ARG(uint32_t, u32Src, 1);
10509 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10510 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10511
10512 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10513 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10514 IEM_MC_ASSIGN(u32Src, u32Imm);
10515 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10516 IEM_MC_FETCH_EFLAGS(EFlags);
10517 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10518
10519 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
10520 IEM_MC_COMMIT_EFLAGS(EFlags);
10521 IEM_MC_ADVANCE_RIP();
10522 IEM_MC_END();
10523 return VINF_SUCCESS;
10524 }
10525
10526 case IEMMODE_64BIT:
10527 {
10528 IEM_MC_BEGIN(3, 2);
10529 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10530 IEM_MC_ARG(uint64_t, u64Src, 1);
10531 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10532 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10533
10534 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10535 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10536 IEM_MC_ASSIGN(u64Src, u64Imm);
10537 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10538 IEM_MC_FETCH_EFLAGS(EFlags);
10539 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10540
10541 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
10542 IEM_MC_COMMIT_EFLAGS(EFlags);
10543 IEM_MC_ADVANCE_RIP();
10544 IEM_MC_END();
10545 return VINF_SUCCESS;
10546 }
10547
10548 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10549 }
10550 }
10551}
10552
10553
10554/** Opcode 0xf6 /4, /5, /6 and /7. */
10555FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
10556{
10557 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10558
10559 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10560 {
10561 /* register access */
10562 IEMOP_HLP_NO_LOCK_PREFIX();
10563 IEM_MC_BEGIN(3, 0);
10564 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10565 IEM_MC_ARG(uint8_t, u8Value, 1);
10566 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10567 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10568 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10569 IEM_MC_REF_EFLAGS(pEFlags);
10570 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10571 IEM_MC_ADVANCE_RIP();
10572 IEM_MC_END();
10573 }
10574 else
10575 {
10576 /* memory access. */
10577 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10578
10579 IEM_MC_BEGIN(3, 1);
10580 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10581 IEM_MC_ARG(uint8_t, u8Value, 1);
10582 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10583 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10584
10585 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10586 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
10587 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10588 IEM_MC_REF_EFLAGS(pEFlags);
10589 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10590
10591 IEM_MC_ADVANCE_RIP();
10592 IEM_MC_END();
10593 }
10594 return VINF_SUCCESS;
10595}
10596
10597
10598/** Opcode 0xf7 /4, /5, /6 and /7. */
10599FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
10600{
10601 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10602 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10603
10604 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10605 {
10606 /* register access */
10607 switch (pIemCpu->enmEffOpSize)
10608 {
10609 case IEMMODE_16BIT:
10610 {
10611 IEMOP_HLP_NO_LOCK_PREFIX();
10612 IEM_MC_BEGIN(4, 1);
10613 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10614 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10615 IEM_MC_ARG(uint16_t, u16Value, 2);
10616 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10617 IEM_MC_LOCAL(int32_t, rc);
10618
10619 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10620 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10621 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10622 IEM_MC_REF_EFLAGS(pEFlags);
10623 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10624 IEM_MC_IF_LOCAL_IS_Z(rc) {
10625 IEM_MC_ADVANCE_RIP();
10626 } IEM_MC_ELSE() {
10627 IEM_MC_RAISE_DIVIDE_ERROR();
10628 } IEM_MC_ENDIF();
10629
10630 IEM_MC_END();
10631 return VINF_SUCCESS;
10632 }
10633
10634 case IEMMODE_32BIT:
10635 {
10636 IEMOP_HLP_NO_LOCK_PREFIX();
10637 IEM_MC_BEGIN(4, 1);
10638 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10639 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10640 IEM_MC_ARG(uint32_t, u32Value, 2);
10641 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10642 IEM_MC_LOCAL(int32_t, rc);
10643
10644 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10645 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10646 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10647 IEM_MC_REF_EFLAGS(pEFlags);
10648 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10649 IEM_MC_IF_LOCAL_IS_Z(rc) {
10650 IEM_MC_ADVANCE_RIP();
10651 } IEM_MC_ELSE() {
10652 IEM_MC_RAISE_DIVIDE_ERROR();
10653 } IEM_MC_ENDIF();
10654
10655 IEM_MC_END();
10656 return VINF_SUCCESS;
10657 }
10658
10659 case IEMMODE_64BIT:
10660 {
10661 IEMOP_HLP_NO_LOCK_PREFIX();
10662 IEM_MC_BEGIN(4, 1);
10663 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10664 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10665 IEM_MC_ARG(uint64_t, u64Value, 2);
10666 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10667 IEM_MC_LOCAL(int32_t, rc);
10668
10669 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10670 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10671 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10672 IEM_MC_REF_EFLAGS(pEFlags);
10673 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
10674 IEM_MC_IF_LOCAL_IS_Z(rc) {
10675 IEM_MC_ADVANCE_RIP();
10676 } IEM_MC_ELSE() {
10677 IEM_MC_RAISE_DIVIDE_ERROR();
10678 } IEM_MC_ENDIF();
10679
10680 IEM_MC_END();
10681 return VINF_SUCCESS;
10682 }
10683
10684 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10685 }
10686 }
10687 else
10688 {
10689 /* memory access. */
10690 switch (pIemCpu->enmEffOpSize)
10691 {
10692 case IEMMODE_16BIT:
10693 {
10694 IEMOP_HLP_NO_LOCK_PREFIX();
10695 IEM_MC_BEGIN(4, 2);
10696 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10697 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10698 IEM_MC_ARG(uint16_t, u16Value, 2);
10699 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10700 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10701 IEM_MC_LOCAL(int32_t, rc);
10702
10703 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10704 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10705 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10706 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10707 IEM_MC_REF_EFLAGS(pEFlags);
10708 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10709 IEM_MC_IF_LOCAL_IS_Z(rc) {
10710 IEM_MC_ADVANCE_RIP();
10711 } IEM_MC_ELSE() {
10712 IEM_MC_RAISE_DIVIDE_ERROR();
10713 } IEM_MC_ENDIF();
10714
10715 IEM_MC_END();
10716 return VINF_SUCCESS;
10717 }
10718
10719 case IEMMODE_32BIT:
10720 {
10721 IEMOP_HLP_NO_LOCK_PREFIX();
10722 IEM_MC_BEGIN(4, 2);
10723 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10724 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10725 IEM_MC_ARG(uint32_t, u32Value, 2);
10726 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10727 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10728 IEM_MC_LOCAL(int32_t, rc);
10729
10730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10731 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
10732 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10733 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10734 IEM_MC_REF_EFLAGS(pEFlags);
10735 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10736 IEM_MC_IF_LOCAL_IS_Z(rc) {
10737 IEM_MC_ADVANCE_RIP();
10738 } IEM_MC_ELSE() {
10739 IEM_MC_RAISE_DIVIDE_ERROR();
10740 } IEM_MC_ENDIF();
10741
10742 IEM_MC_END();
10743 return VINF_SUCCESS;
10744 }
10745
10746 case IEMMODE_64BIT:
10747 {
10748 IEMOP_HLP_NO_LOCK_PREFIX();
10749 IEM_MC_BEGIN(4, 2);
10750 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10751 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10752 IEM_MC_ARG(uint64_t, u64Value, 2);
10753 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10754 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10755 IEM_MC_LOCAL(int32_t, rc);
10756
10757 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10758 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
10759 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10760 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10761 IEM_MC_REF_EFLAGS(pEFlags);
10762 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
10763 IEM_MC_IF_LOCAL_IS_Z(rc) {
10764 IEM_MC_ADVANCE_RIP();
10765 } IEM_MC_ELSE() {
10766 IEM_MC_RAISE_DIVIDE_ERROR();
10767 } IEM_MC_ENDIF();
10768
10769 IEM_MC_END();
10770 return VINF_SUCCESS;
10771 }
10772
10773 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10774 }
10775 }
10776}
10777
10778/** Opcode 0xf6. */
10779FNIEMOP_DEF(iemOp_Grp3_Eb)
10780{
10781 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10782 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10783 {
10784 case 0:
10785 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
10786 case 1:
10787 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
10788 case 2:
10789 IEMOP_MNEMONIC("not Eb");
10790 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
10791 case 3:
10792 IEMOP_MNEMONIC("neg Eb");
10793 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
10794 case 4:
10795 IEMOP_MNEMONIC("mul Eb");
10796 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10797 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
10798 case 5:
10799 IEMOP_MNEMONIC("imul Eb");
10800 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10801 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
10802 case 6:
10803 IEMOP_MNEMONIC("div Eb");
10804 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10805 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
10806 case 7:
10807 IEMOP_MNEMONIC("idiv Eb");
10808 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10809 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
10810 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10811 }
10812}
10813
10814
10815/** Opcode 0xf7. */
10816FNIEMOP_DEF(iemOp_Grp3_Ev)
10817{
10818 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10819 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10820 {
10821 case 0:
10822 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
10823 case 1:
10824 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
10825 case 2:
10826 IEMOP_MNEMONIC("not Ev");
10827 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
10828 case 3:
10829 IEMOP_MNEMONIC("neg Ev");
10830 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
10831 case 4:
10832 IEMOP_MNEMONIC("mul Ev");
10833 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10834 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
10835 case 5:
10836 IEMOP_MNEMONIC("imul Ev");
10837 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10838 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
10839 case 6:
10840 IEMOP_MNEMONIC("div Ev");
10841 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10842 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
10843 case 7:
10844 IEMOP_MNEMONIC("idiv Ev");
10845 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10846 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
10847 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10848 }
10849}
10850
10851
10852/** Opcode 0xf8. */
10853FNIEMOP_DEF(iemOp_clc)
10854{
10855 IEMOP_MNEMONIC("clc");
10856 IEMOP_HLP_NO_LOCK_PREFIX();
10857 IEM_MC_BEGIN(0, 0);
10858 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
10859 IEM_MC_ADVANCE_RIP();
10860 IEM_MC_END();
10861 return VINF_SUCCESS;
10862}
10863
10864
10865/** Opcode 0xf9. */
10866FNIEMOP_DEF(iemOp_stc)
10867{
10868 IEMOP_MNEMONIC("slc");
10869 IEMOP_HLP_NO_LOCK_PREFIX();
10870 IEM_MC_BEGIN(0, 0);
10871 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
10872 IEM_MC_ADVANCE_RIP();
10873 IEM_MC_END();
10874 return VINF_SUCCESS;
10875}
10876
10877
10878/** Opcode 0xfa. */
10879FNIEMOP_DEF(iemOp_cli)
10880{
10881 IEMOP_MNEMONIC("cli");
10882 IEMOP_HLP_NO_LOCK_PREFIX();
10883 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
10884}
10885
10886
10887FNIEMOP_DEF(iemOp_sti)
10888{
10889 IEMOP_MNEMONIC("sti");
10890 IEMOP_HLP_NO_LOCK_PREFIX();
10891 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
10892}
10893
10894
10895/** Opcode 0xfc. */
10896FNIEMOP_DEF(iemOp_cld)
10897{
10898 IEMOP_MNEMONIC("cld");
10899 IEMOP_HLP_NO_LOCK_PREFIX();
10900 IEM_MC_BEGIN(0, 0);
10901 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
10902 IEM_MC_ADVANCE_RIP();
10903 IEM_MC_END();
10904 return VINF_SUCCESS;
10905}
10906
10907
10908/** Opcode 0xfd. */
10909FNIEMOP_DEF(iemOp_std)
10910{
10911 IEMOP_MNEMONIC("std");
10912 IEMOP_HLP_NO_LOCK_PREFIX();
10913 IEM_MC_BEGIN(0, 0);
10914 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
10915 IEM_MC_ADVANCE_RIP();
10916 IEM_MC_END();
10917 return VINF_SUCCESS;
10918}
10919
10920
10921/** Opcode 0xfe. */
10922FNIEMOP_DEF(iemOp_Grp4)
10923{
10924 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10925 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10926 {
10927 case 0:
10928 IEMOP_MNEMONIC("inc Ev");
10929 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
10930 case 1:
10931 IEMOP_MNEMONIC("dec Ev");
10932 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
10933 default:
10934 IEMOP_MNEMONIC("grp4-ud");
10935 return IEMOP_RAISE_INVALID_OPCODE();
10936 }
10937}
10938
10939
10940/**
10941 * Opcode 0xff /2.
10942 * @param bRm The RM byte.
10943 */
10944FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
10945{
10946 IEMOP_MNEMONIC("calln Ev");
10947 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
10948 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10949
10950 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10951 {
10952 /* The new RIP is taken from a register. */
10953 switch (pIemCpu->enmEffOpSize)
10954 {
10955 case IEMMODE_16BIT:
10956 IEM_MC_BEGIN(1, 0);
10957 IEM_MC_ARG(uint16_t, u16Target, 0);
10958 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10959 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
10960 IEM_MC_END()
10961 return VINF_SUCCESS;
10962
10963 case IEMMODE_32BIT:
10964 IEM_MC_BEGIN(1, 0);
10965 IEM_MC_ARG(uint32_t, u32Target, 0);
10966 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10967 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
10968 IEM_MC_END()
10969 return VINF_SUCCESS;
10970
10971 case IEMMODE_64BIT:
10972 IEM_MC_BEGIN(1, 0);
10973 IEM_MC_ARG(uint64_t, u64Target, 0);
10974 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10975 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
10976 IEM_MC_END()
10977 return VINF_SUCCESS;
10978
10979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10980 }
10981 }
10982 else
10983 {
10984 /* The new RIP is taken from a register. */
10985 switch (pIemCpu->enmEffOpSize)
10986 {
10987 case IEMMODE_16BIT:
10988 IEM_MC_BEGIN(1, 1);
10989 IEM_MC_ARG(uint16_t, u16Target, 0);
10990 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10991 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
10992 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
10993 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
10994 IEM_MC_END()
10995 return VINF_SUCCESS;
10996
10997 case IEMMODE_32BIT:
10998 IEM_MC_BEGIN(1, 1);
10999 IEM_MC_ARG(uint32_t, u32Target, 0);
11000 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11001 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11002 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11003 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11004 IEM_MC_END()
11005 return VINF_SUCCESS;
11006
11007 case IEMMODE_64BIT:
11008 IEM_MC_BEGIN(1, 1);
11009 IEM_MC_ARG(uint64_t, u64Target, 0);
11010 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11011 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11012 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11013 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11014 IEM_MC_END()
11015 return VINF_SUCCESS;
11016
11017 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11018 }
11019 }
11020}
11021
11022
11023/**
11024 * Opcode 0xff /3.
11025 * @param bRm The RM byte.
11026 */
11027FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
11028{
11029 IEMOP_MNEMONIC("callf Ep");
11030 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11031
11032 /* Registers? How?? */
11033 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11034 {
11035 /** @todo How the heck does a 'callf eax' work? Probably just have to
11036 * search the docs... */
11037 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11038 }
11039
11040 /* Far pointer loaded from memory. */
11041 switch (pIemCpu->enmEffOpSize)
11042 {
11043 case IEMMODE_16BIT:
11044 IEM_MC_BEGIN(3, 1);
11045 IEM_MC_ARG(uint16_t, u16Sel, 0);
11046 IEM_MC_ARG(uint16_t, offSeg, 1);
11047 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11048 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11049 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11050 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11051 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11052 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11053 IEM_MC_END();
11054 return VINF_SUCCESS;
11055
11056 case IEMMODE_32BIT:
11057 IEM_MC_BEGIN(3, 1);
11058 IEM_MC_ARG(uint16_t, u16Sel, 0);
11059 IEM_MC_ARG(uint32_t, offSeg, 1);
11060 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11061 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11062 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11063 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11064 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11065 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11066 IEM_MC_END();
11067 return VINF_SUCCESS;
11068
11069 case IEMMODE_64BIT:
11070 IEM_MC_BEGIN(3, 1);
11071 IEM_MC_ARG(uint16_t, u16Sel, 0);
11072 IEM_MC_ARG(uint64_t, offSeg, 1);
11073 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11074 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11075 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11076 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11077 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11078 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11079 IEM_MC_END();
11080 return VINF_SUCCESS;
11081
11082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11083 }
11084}
11085
11086
11087/**
11088 * Opcode 0xff /4.
11089 * @param bRm The RM byte.
11090 */
11091FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
11092{
11093 IEMOP_MNEMONIC("callf Ep");
11094 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11095 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11096
11097 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11098 {
11099 /* The new RIP is taken from a register. */
11100 switch (pIemCpu->enmEffOpSize)
11101 {
11102 case IEMMODE_16BIT:
11103 IEM_MC_BEGIN(0, 1);
11104 IEM_MC_LOCAL(uint16_t, u16Target);
11105 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11106 IEM_MC_SET_RIP_U16(u16Target);
11107 IEM_MC_END()
11108 return VINF_SUCCESS;
11109
11110 case IEMMODE_32BIT:
11111 IEM_MC_BEGIN(0, 1);
11112 IEM_MC_LOCAL(uint32_t, u32Target);
11113 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11114 IEM_MC_SET_RIP_U32(u32Target);
11115 IEM_MC_END()
11116 return VINF_SUCCESS;
11117
11118 case IEMMODE_64BIT:
11119 IEM_MC_BEGIN(0, 1);
11120 IEM_MC_LOCAL(uint64_t, u64Target);
11121 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11122 IEM_MC_SET_RIP_U64(u64Target);
11123 IEM_MC_END()
11124 return VINF_SUCCESS;
11125
11126 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11127 }
11128 }
11129 else
11130 {
11131 /* The new RIP is taken from a register. */
11132 switch (pIemCpu->enmEffOpSize)
11133 {
11134 case IEMMODE_16BIT:
11135 IEM_MC_BEGIN(0, 2);
11136 IEM_MC_LOCAL(uint16_t, u16Target);
11137 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11138 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11139 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11140 IEM_MC_SET_RIP_U16(u16Target);
11141 IEM_MC_END()
11142 return VINF_SUCCESS;
11143
11144 case IEMMODE_32BIT:
11145 IEM_MC_BEGIN(0, 2);
11146 IEM_MC_LOCAL(uint32_t, u32Target);
11147 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11148 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11149 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11150 IEM_MC_SET_RIP_U32(u32Target);
11151 IEM_MC_END()
11152 return VINF_SUCCESS;
11153
11154 case IEMMODE_64BIT:
11155 IEM_MC_BEGIN(0, 2);
11156 IEM_MC_LOCAL(uint32_t, u32Target);
11157 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11158 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11159 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11160 IEM_MC_SET_RIP_U32(u32Target);
11161 IEM_MC_END()
11162 return VINF_SUCCESS;
11163
11164 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11165 }
11166 }
11167}
11168
11169
11170/**
11171 * Opcode 0xff /5.
11172 * @param bRm The RM byte.
11173 */
11174FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
11175{
11176 IEMOP_MNEMONIC("jmp Ap");
11177 IEMOP_HLP_NO_64BIT();
11178 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
11179
11180 /* Decode the far pointer address and pass it on to the far call C
11181 implementation. */
11182 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11183 {
11184 /** @todo How the heck does a 'callf eax' work? Probably just have to
11185 * search the docs... */
11186 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11187 }
11188
11189 /* Far pointer loaded from memory. */
11190 switch (pIemCpu->enmEffOpSize)
11191 {
11192 case IEMMODE_16BIT:
11193 IEM_MC_BEGIN(3, 1);
11194 IEM_MC_ARG(uint16_t, u16Sel, 0);
11195 IEM_MC_ARG(uint16_t, offSeg, 1);
11196 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11197 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11198 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11199 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11200 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11201 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11202 IEM_MC_END();
11203 return VINF_SUCCESS;
11204
11205 case IEMMODE_32BIT:
11206 IEM_MC_BEGIN(3, 1);
11207 IEM_MC_ARG(uint16_t, u16Sel, 0);
11208 IEM_MC_ARG(uint32_t, offSeg, 1);
11209 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11210 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11211 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11212 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11213 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11214 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11215 IEM_MC_END();
11216 return VINF_SUCCESS;
11217
11218 case IEMMODE_64BIT:
11219 IEM_MC_BEGIN(3, 1);
11220 IEM_MC_ARG(uint16_t, u16Sel, 0);
11221 IEM_MC_ARG(uint64_t, offSeg, 1);
11222 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11223 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11224 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11225 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11226 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11227 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11228 IEM_MC_END();
11229 return VINF_SUCCESS;
11230
11231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11232 }
11233}
11234
11235
11236/**
11237 * Opcode 0xff /6.
11238 * @param bRm The RM byte.
11239 */
11240FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
11241{
11242 IEMOP_MNEMONIC("push Ev");
11243 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11244
11245 /* Registers are handled by a common worker. */
11246 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11247 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11248
11249 /* Memory we do here. */
11250 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11251 switch (pIemCpu->enmEffOpSize)
11252 {
11253 case IEMMODE_16BIT:
11254 IEM_MC_BEGIN(0, 2);
11255 IEM_MC_LOCAL(uint16_t, u16Src);
11256 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11257 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11258 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11259 IEM_MC_PUSH_U16(u16Src);
11260 IEM_MC_ADVANCE_RIP();
11261 IEM_MC_END();
11262 return VINF_SUCCESS;
11263
11264 case IEMMODE_32BIT:
11265 IEM_MC_BEGIN(0, 2);
11266 IEM_MC_LOCAL(uint32_t, u32Src);
11267 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11268 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11269 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11270 IEM_MC_PUSH_U32(u32Src);
11271 IEM_MC_ADVANCE_RIP();
11272 IEM_MC_END();
11273 return VINF_SUCCESS;
11274
11275 case IEMMODE_64BIT:
11276 IEM_MC_BEGIN(0, 2);
11277 IEM_MC_LOCAL(uint64_t, u64Src);
11278 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11279 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11280 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11281 IEM_MC_PUSH_U64(u64Src);
11282 IEM_MC_ADVANCE_RIP();
11283 IEM_MC_END();
11284 return VINF_SUCCESS;
11285 }
11286 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11287}
11288
11289
11290/** Opcode 0xff. */
11291FNIEMOP_DEF(iemOp_Grp5)
11292{
11293 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11294 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11295 {
11296 case 0:
11297 IEMOP_MNEMONIC("inc Ev");
11298 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
11299 case 1:
11300 IEMOP_MNEMONIC("dec Ev");
11301 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
11302 case 2:
11303 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
11304 case 3:
11305 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
11306 case 4:
11307 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
11308 case 5:
11309 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
11310 case 6:
11311 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
11312 case 7:
11313 IEMOP_MNEMONIC("grp5-ud");
11314 return IEMOP_RAISE_INVALID_OPCODE();
11315 }
11316 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
11317}
11318
11319
11320
11321const PFNIEMOP g_apfnOneByteMap[256] =
11322{
11323 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
11324 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
11325 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
11326 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
11327 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
11328 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
11329 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
11330 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
11331 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
11332 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
11333 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
11334 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
11335 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
11336 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
11337 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
11338 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
11339 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
11340 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
11341 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
11342 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
11343 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
11344 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
11345 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
11346 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
11347 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
11348 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
11349 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
11350 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
11351 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
11352 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
11353 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
11354 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
11355 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
11356 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
11357 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
11358 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
11359 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
11360 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
11361 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
11362 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
11363 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
11364 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
11365 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
11366 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
11367 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
11368 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
11369 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
11370 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
11371 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
11372 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
11373 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
11374 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
11375 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
11376 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
11377 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
11378 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
11379 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
11380 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
11381 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
11382 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
11383 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe,
11384 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
11385 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
11386 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
11387};
11388
11389
11390/** @} */
11391
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette