VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 36944

Last change on this file since 36944 was 36860, checked in by vboxsync, 14 years ago

IEM: rdtsc, mov DRx, ltr, lldt. cmovnle fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 385.2 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 36860 2011-04-27 17:31:21Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Common worker for instructions like ADD, AND, OR, ++ with a byte
21 * memory/register as the destination.
22 *
23 * @param pImpl Pointer to the instruction implementation (assembly).
24 */
25FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
26{
27 uint8_t bRm;
28 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
29
30 /*
31 * If rm is denoting a register, no more instruction bytes.
32 */
33 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
34 {
35 IEMOP_HLP_NO_LOCK_PREFIX();
36
37 IEM_MC_BEGIN(3, 0);
38 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
39 IEM_MC_ARG(uint8_t, u8Src, 1);
40 IEM_MC_ARG(uint32_t *, pEFlags, 2);
41
42 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
43 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
44 IEM_MC_REF_EFLAGS(pEFlags);
45 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
46
47 IEM_MC_ADVANCE_RIP();
48 IEM_MC_END();
49 }
50 else
51 {
52 /*
53 * We're accessing memory.
54 * Note! We're putting the eflags on the stack here so we can commit them
55 * after the memory.
56 */
57 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
58 IEM_MC_BEGIN(3, 2);
59 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
60 IEM_MC_ARG(uint8_t, u8Src, 1);
61 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
62 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
63
64 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
65 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
66 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
67 IEM_MC_FETCH_EFLAGS(EFlags);
68 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
69 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
70 else
71 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
72
73 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
74 IEM_MC_COMMIT_EFLAGS(EFlags);
75 IEM_MC_ADVANCE_RIP();
76 IEM_MC_END();
77 }
78 return VINF_SUCCESS;
79}
80
81
82/**
83 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
84 * memory/register as the destination.
85 *
86 * @param pImpl Pointer to the instruction implementation (assembly).
87 */
88FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
89{
90 uint8_t bRm;
91 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
92
93 /*
94 * If rm is denoting a register, no more instruction bytes.
95 */
96 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
97 {
98 IEMOP_HLP_NO_LOCK_PREFIX();
99
100 switch (pIemCpu->enmEffOpSize)
101 {
102 case IEMMODE_16BIT:
103 IEM_MC_BEGIN(3, 0);
104 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
105 IEM_MC_ARG(uint16_t, u16Src, 1);
106 IEM_MC_ARG(uint32_t *, pEFlags, 2);
107
108 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
109 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
110 IEM_MC_REF_EFLAGS(pEFlags);
111 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
112
113 IEM_MC_ADVANCE_RIP();
114 IEM_MC_END();
115 break;
116
117 case IEMMODE_32BIT:
118 IEM_MC_BEGIN(3, 0);
119 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
120 IEM_MC_ARG(uint32_t, u32Src, 1);
121 IEM_MC_ARG(uint32_t *, pEFlags, 2);
122
123 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
124 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
125 IEM_MC_REF_EFLAGS(pEFlags);
126 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
127
128 IEM_MC_ADVANCE_RIP();
129 IEM_MC_END();
130 break;
131
132 case IEMMODE_64BIT:
133 IEM_MC_BEGIN(3, 0);
134 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
135 IEM_MC_ARG(uint64_t, u64Src, 1);
136 IEM_MC_ARG(uint32_t *, pEFlags, 2);
137
138 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
139 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
140 IEM_MC_REF_EFLAGS(pEFlags);
141 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
142
143 IEM_MC_ADVANCE_RIP();
144 IEM_MC_END();
145 break;
146 }
147 }
148 else
149 {
150 /*
151 * We're accessing memory.
152 * Note! We're putting the eflags on the stack here so we can commit them
153 * after the memory.
154 */
155 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
156 switch (pIemCpu->enmEffOpSize)
157 {
158 case IEMMODE_16BIT:
159 IEM_MC_BEGIN(3, 2);
160 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
161 IEM_MC_ARG(uint16_t, u16Src, 1);
162 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
163 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
164
165 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
166 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
167 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
168 IEM_MC_FETCH_EFLAGS(EFlags);
169 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
170 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
171 else
172 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
173
174 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
175 IEM_MC_COMMIT_EFLAGS(EFlags);
176 IEM_MC_ADVANCE_RIP();
177 IEM_MC_END();
178 break;
179
180 case IEMMODE_32BIT:
181 IEM_MC_BEGIN(3, 2);
182 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
183 IEM_MC_ARG(uint32_t, u32Src, 1);
184 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
185 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
186
187 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
188 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
189 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
190 IEM_MC_FETCH_EFLAGS(EFlags);
191 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
192 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
193 else
194 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
195
196 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
197 IEM_MC_COMMIT_EFLAGS(EFlags);
198 IEM_MC_ADVANCE_RIP();
199 IEM_MC_END();
200 break;
201
202 case IEMMODE_64BIT:
203 IEM_MC_BEGIN(3, 2);
204 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
205 IEM_MC_ARG(uint64_t, u64Src, 1);
206 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
207 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
208
209 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
210 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
211 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
212 IEM_MC_FETCH_EFLAGS(EFlags);
213 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
214 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
215 else
216 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
217
218 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
219 IEM_MC_COMMIT_EFLAGS(EFlags);
220 IEM_MC_ADVANCE_RIP();
221 IEM_MC_END();
222 break;
223 }
224 }
225 return VINF_SUCCESS;
226}
227
228
229/**
230 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
231 * the destination.
232 *
233 * @param pImpl Pointer to the instruction implementation (assembly).
234 */
235FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
236{
237 uint8_t bRm;
238 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
239 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
240
241 /*
242 * If rm is denoting a register, no more instruction bytes.
243 */
244 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
245 {
246 IEM_MC_BEGIN(3, 0);
247 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
248 IEM_MC_ARG(uint8_t, u8Src, 1);
249 IEM_MC_ARG(uint32_t *, pEFlags, 2);
250
251 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
252 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
253 IEM_MC_REF_EFLAGS(pEFlags);
254 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
255
256 IEM_MC_ADVANCE_RIP();
257 IEM_MC_END();
258 }
259 else
260 {
261 /*
262 * We're accessing memory.
263 */
264 IEM_MC_BEGIN(3, 1);
265 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
266 IEM_MC_ARG(uint8_t, u8Src, 1);
267 IEM_MC_ARG(uint32_t *, pEFlags, 2);
268 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
269
270 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
271 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
272 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
273 IEM_MC_REF_EFLAGS(pEFlags);
274 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
275
276 IEM_MC_ADVANCE_RIP();
277 IEM_MC_END();
278 }
279 return VINF_SUCCESS;
280}
281
282
283/**
284 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
285 * register as the destination.
286 *
287 * @param pImpl Pointer to the instruction implementation (assembly).
288 */
289FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
290{
291 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
292 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
293
294 /*
295 * If rm is denoting a register, no more instruction bytes.
296 */
297 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
298 {
299 switch (pIemCpu->enmEffOpSize)
300 {
301 case IEMMODE_16BIT:
302 IEM_MC_BEGIN(3, 0);
303 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
304 IEM_MC_ARG(uint16_t, u16Src, 1);
305 IEM_MC_ARG(uint32_t *, pEFlags, 2);
306
307 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
308 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
309 IEM_MC_REF_EFLAGS(pEFlags);
310 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
311
312 IEM_MC_ADVANCE_RIP();
313 IEM_MC_END();
314 break;
315
316 case IEMMODE_32BIT:
317 IEM_MC_BEGIN(3, 0);
318 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
319 IEM_MC_ARG(uint32_t, u32Src, 1);
320 IEM_MC_ARG(uint32_t *, pEFlags, 2);
321
322 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
323 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
324 IEM_MC_REF_EFLAGS(pEFlags);
325 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
326
327 IEM_MC_ADVANCE_RIP();
328 IEM_MC_END();
329 break;
330
331 case IEMMODE_64BIT:
332 IEM_MC_BEGIN(3, 0);
333 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
334 IEM_MC_ARG(uint64_t, u64Src, 1);
335 IEM_MC_ARG(uint32_t *, pEFlags, 2);
336
337 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
338 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
339 IEM_MC_REF_EFLAGS(pEFlags);
340 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
341
342 IEM_MC_ADVANCE_RIP();
343 IEM_MC_END();
344 break;
345 }
346 }
347 else
348 {
349 /*
350 * We're accessing memory.
351 */
352 switch (pIemCpu->enmEffOpSize)
353 {
354 case IEMMODE_16BIT:
355 IEM_MC_BEGIN(3, 1);
356 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
357 IEM_MC_ARG(uint16_t, u16Src, 1);
358 IEM_MC_ARG(uint32_t *, pEFlags, 2);
359 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
360
361 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
362 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
363 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
364 IEM_MC_REF_EFLAGS(pEFlags);
365 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
366
367 IEM_MC_ADVANCE_RIP();
368 IEM_MC_END();
369 break;
370
371 case IEMMODE_32BIT:
372 IEM_MC_BEGIN(3, 1);
373 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
374 IEM_MC_ARG(uint32_t, u32Src, 1);
375 IEM_MC_ARG(uint32_t *, pEFlags, 2);
376 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
377
378 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
379 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
380 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
381 IEM_MC_REF_EFLAGS(pEFlags);
382 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
383
384 IEM_MC_ADVANCE_RIP();
385 IEM_MC_END();
386 break;
387
388 case IEMMODE_64BIT:
389 IEM_MC_BEGIN(3, 1);
390 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
391 IEM_MC_ARG(uint64_t, u64Src, 1);
392 IEM_MC_ARG(uint32_t *, pEFlags, 2);
393 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
394
395 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
396 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
397 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
398 IEM_MC_REF_EFLAGS(pEFlags);
399 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
400
401 IEM_MC_ADVANCE_RIP();
402 IEM_MC_END();
403 break;
404 }
405 }
406 return VINF_SUCCESS;
407}
408
409
410/**
411 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
412 * a byte immediate.
413 *
414 * @param pImpl Pointer to the instruction implementation (assembly).
415 */
416FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
417{
418 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
419 IEMOP_HLP_NO_LOCK_PREFIX();
420
421 IEM_MC_BEGIN(3, 0);
422 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
423 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
424 IEM_MC_ARG(uint32_t *, pEFlags, 2);
425
426 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
427 IEM_MC_REF_EFLAGS(pEFlags);
428 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
429
430 IEM_MC_ADVANCE_RIP();
431 IEM_MC_END();
432 return VINF_SUCCESS;
433}
434
435
436/**
437 * Common worker for instructions like ADD, AND, OR, ++ with working on
438 * AX/EAX/RAX with a word/dword immediate.
439 *
440 * @param pImpl Pointer to the instruction implementation (assembly).
441 */
442FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
443{
444 switch (pIemCpu->enmEffOpSize)
445 {
446 case IEMMODE_16BIT:
447 {
448 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
449 IEMOP_HLP_NO_LOCK_PREFIX();
450
451 IEM_MC_BEGIN(3, 0);
452 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
453 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
454 IEM_MC_ARG(uint32_t *, pEFlags, 2);
455
456 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
457 IEM_MC_REF_EFLAGS(pEFlags);
458 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
459
460 IEM_MC_ADVANCE_RIP();
461 IEM_MC_END();
462 return VINF_SUCCESS;
463 }
464
465 case IEMMODE_32BIT:
466 {
467 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
468 IEMOP_HLP_NO_LOCK_PREFIX();
469
470 IEM_MC_BEGIN(3, 0);
471 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
472 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
473 IEM_MC_ARG(uint32_t *, pEFlags, 2);
474
475 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
476 IEM_MC_REF_EFLAGS(pEFlags);
477 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
478
479 IEM_MC_ADVANCE_RIP();
480 IEM_MC_END();
481 return VINF_SUCCESS;
482 }
483
484 case IEMMODE_64BIT:
485 {
486 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
487 IEMOP_HLP_NO_LOCK_PREFIX();
488
489 IEM_MC_BEGIN(3, 0);
490 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
491 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
492 IEM_MC_ARG(uint32_t *, pEFlags, 2);
493
494 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
495 IEM_MC_REF_EFLAGS(pEFlags);
496 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
497
498 IEM_MC_ADVANCE_RIP();
499 IEM_MC_END();
500 return VINF_SUCCESS;
501 }
502
503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
504 }
505}
506
507
508/** Opcodes 0xf1, 0xd6. */
509FNIEMOP_DEF(iemOp_Invalid)
510{
511 IEMOP_MNEMONIC("Invalid");
512 return IEMOP_RAISE_INVALID_OPCODE();
513}
514
515
516
517/** @name ..... opcodes.
518 *
519 * @{
520 */
521
522/** @} */
523
524
525/** @name Two byte opcodes (first byte 0x0f).
526 *
527 * @{
528 */
529
530/** Opcode 0x0f 0x00 /0. */
531FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
532
533
534/** Opcode 0x0f 0x00 /1. */
535FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
536
537
538/** Opcode 0x0f 0x00 /2. */
539FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
540{
541 IEMOP_HLP_NO_LOCK_PREFIX();
542 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
543 {
544 IEM_MC_BEGIN(1, 0);
545 IEM_MC_ARG(uint16_t, u16Sel, 0);
546 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
547 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
548 IEM_MC_END();
549 }
550 else
551 {
552 IEM_MC_BEGIN(1, 1);
553 IEM_MC_ARG(uint16_t, u16Sel, 0);
554 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
555 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
556 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
557 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
558 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
559 IEM_MC_END();
560 }
561 return VINF_SUCCESS;
562}
563
564
565/** Opcode 0x0f 0x00 /3. */
566FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
567{
568 IEMOP_HLP_NO_LOCK_PREFIX();
569 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
570 {
571 IEM_MC_BEGIN(1, 0);
572 IEM_MC_ARG(uint16_t, u16Sel, 0);
573 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
574 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
575 IEM_MC_END();
576 }
577 else
578 {
579 IEM_MC_BEGIN(1, 1);
580 IEM_MC_ARG(uint16_t, u16Sel, 0);
581 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
582 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
583 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
584 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
585 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
586 IEM_MC_END();
587 }
588 return VINF_SUCCESS;
589}
590
591
592/** Opcode 0x0f 0x00 /4. */
593FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
594
595
596/** Opcode 0x0f 0x00 /5. */
597FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
598
599
600/** Opcode 0x0f 0x00. */
601FNIEMOP_DEF(iemOp_Grp6)
602{
603 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
604 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
605 {
606 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
607 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
608 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
609 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
610 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
611 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
612 case 6: return IEMOP_RAISE_INVALID_OPCODE();
613 case 7: return IEMOP_RAISE_INVALID_OPCODE();
614 IEM_NOT_REACHED_DEFAULT_CASE_RET();
615 }
616
617}
618
619
620/** Opcode 0x0f 0x01 /0. */
621FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
622{
623 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
624}
625
626
627/** Opcode 0x0f 0x01 /0. */
628FNIEMOP_DEF(iemOp_Grp7_vmcall)
629{
630 AssertFailed();
631 return IEMOP_RAISE_INVALID_OPCODE();
632}
633
634
635/** Opcode 0x0f 0x01 /0. */
636FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
637{
638 AssertFailed();
639 return IEMOP_RAISE_INVALID_OPCODE();
640}
641
642
643/** Opcode 0x0f 0x01 /0. */
644FNIEMOP_DEF(iemOp_Grp7_vmresume)
645{
646 AssertFailed();
647 return IEMOP_RAISE_INVALID_OPCODE();
648}
649
650
651/** Opcode 0x0f 0x01 /0. */
652FNIEMOP_DEF(iemOp_Grp7_vmxoff)
653{
654 AssertFailed();
655 return IEMOP_RAISE_INVALID_OPCODE();
656}
657
658
659/** Opcode 0x0f 0x01 /1. */
660FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
661{
662 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
663}
664
665
666/** Opcode 0x0f 0x01 /1. */
667FNIEMOP_DEF(iemOp_Grp7_monitor)
668{
669 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
670}
671
672
673/** Opcode 0x0f 0x01 /1. */
674FNIEMOP_DEF(iemOp_Grp7_mwait)
675{
676 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
677}
678
679
680/** Opcode 0x0f 0x01 /2. */
681FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
682{
683 IEMOP_HLP_NO_LOCK_PREFIX();
684
685 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
686 ? IEMMODE_64BIT
687 : pIemCpu->enmEffOpSize;
688 IEM_MC_BEGIN(3, 1);
689 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
690 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
691 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
692 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
693 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
694 IEM_MC_END();
695 return VINF_SUCCESS;
696}
697
698
699/** Opcode 0x0f 0x01 /2. */
700FNIEMOP_DEF(iemOp_Grp7_xgetbv)
701{
702 AssertFailed();
703 return IEMOP_RAISE_INVALID_OPCODE();
704}
705
706
707/** Opcode 0x0f 0x01 /2. */
708FNIEMOP_DEF(iemOp_Grp7_xsetbv)
709{
710 AssertFailed();
711 return IEMOP_RAISE_INVALID_OPCODE();
712}
713
714
715/** Opcode 0x0f 0x01 /3. */
716FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
717{
718 IEMOP_HLP_NO_LOCK_PREFIX();
719
720 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
721 ? IEMMODE_64BIT
722 : pIemCpu->enmEffOpSize;
723 IEM_MC_BEGIN(3, 1);
724 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
725 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
726 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
727 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
728 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
729 IEM_MC_END();
730 return VINF_SUCCESS;
731}
732
733
734/** Opcode 0x0f 0x01 /4. */
735FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
736{
737 IEMOP_HLP_NO_LOCK_PREFIX();
738 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
739 {
740 switch (pIemCpu->enmEffOpSize)
741 {
742 case IEMMODE_16BIT:
743 IEM_MC_BEGIN(0, 1);
744 IEM_MC_LOCAL(uint16_t, u16Tmp);
745 IEM_MC_FETCH_CR0_U16(u16Tmp);
746 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
747 IEM_MC_ADVANCE_RIP();
748 IEM_MC_END();
749 return VINF_SUCCESS;
750
751 case IEMMODE_32BIT:
752 IEM_MC_BEGIN(0, 1);
753 IEM_MC_LOCAL(uint32_t, u32Tmp);
754 IEM_MC_FETCH_CR0_U32(u32Tmp);
755 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
756 IEM_MC_ADVANCE_RIP();
757 IEM_MC_END();
758 return VINF_SUCCESS;
759
760 case IEMMODE_64BIT:
761 IEM_MC_BEGIN(0, 1);
762 IEM_MC_LOCAL(uint64_t, u64Tmp);
763 IEM_MC_FETCH_CR0_U64(u64Tmp);
764 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
765 IEM_MC_ADVANCE_RIP();
766 IEM_MC_END();
767 return VINF_SUCCESS;
768
769 IEM_NOT_REACHED_DEFAULT_CASE_RET();
770 }
771 }
772 else
773 {
774 /* Ignore operand size here, memory refs are always 16-bit. */
775 IEM_MC_BEGIN(0, 2);
776 IEM_MC_LOCAL(uint16_t, u16Tmp);
777 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
778 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
779 IEM_MC_FETCH_CR0_U16(u16Tmp);
780 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
781 IEM_MC_ADVANCE_RIP();
782 IEM_MC_END();
783 return VINF_SUCCESS;
784 }
785}
786
787
788/** Opcode 0x0f 0x01 /6. */
789FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
790{
791 /* The operand size is effectively ignored, all is 16-bit and only the
792 lower 3-bits are used. */
793 IEMOP_HLP_NO_LOCK_PREFIX();
794 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
795 {
796 IEM_MC_BEGIN(1, 0);
797 IEM_MC_ARG(uint16_t, u16Tmp, 0);
798 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
799 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
800 IEM_MC_END();
801 }
802 else
803 {
804 IEM_MC_BEGIN(1, 1);
805 IEM_MC_ARG(uint16_t, u16Tmp, 0);
806 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
807 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
808 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
809 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
810 IEM_MC_END();
811 }
812 return VINF_SUCCESS;
813}
814
815
816/** Opcode 0x0f 0x01 /7. */
817FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
818{
819 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
820}
821
822
823/** Opcode 0x0f 0x01 /7. */
824FNIEMOP_DEF(iemOp_Grp7_swapgs)
825{
826 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
827}
828
829
830/** Opcode 0x0f 0x01 /7. */
831FNIEMOP_DEF(iemOp_Grp7_rdtscp)
832{
833 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
834}
835
836
837/** Opcode 0x0f 0x01. */
838FNIEMOP_DEF(iemOp_Grp7)
839{
840 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
841 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
842 {
843 case 0:
844 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
845 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
846 switch (bRm & X86_MODRM_RM_MASK)
847 {
848 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
849 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
850 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
851 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
852 }
853 return IEMOP_RAISE_INVALID_OPCODE();
854
855 case 1:
856 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
857 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
858 switch (bRm & X86_MODRM_RM_MASK)
859 {
860 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
861 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
862 }
863 return IEMOP_RAISE_INVALID_OPCODE();
864
865 case 2:
866 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
867 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
868 switch (bRm & X86_MODRM_RM_MASK)
869 {
870 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
871 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
872 }
873 return IEMOP_RAISE_INVALID_OPCODE();
874
875 case 3:
876 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
877 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
878 return IEMOP_RAISE_INVALID_OPCODE();
879
880 case 4:
881 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
882
883 case 5:
884 return IEMOP_RAISE_INVALID_OPCODE();
885
886 case 6:
887 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
888
889 case 7:
890 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
891 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
892 switch (bRm & X86_MODRM_RM_MASK)
893 {
894 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
895 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
896 }
897 return IEMOP_RAISE_INVALID_OPCODE();
898
899 IEM_NOT_REACHED_DEFAULT_CASE_RET();
900 }
901}
902
903
904/** Opcode 0x0f 0x02. */
905FNIEMOP_STUB(iemOp_lar_Gv_Ew);
906/** Opcode 0x0f 0x03. */
907FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
908/** Opcode 0x0f 0x04. */
909FNIEMOP_STUB(iemOp_syscall);
910
911
912/** Opcode 0x0f 0x05. */
913FNIEMOP_DEF(iemOp_clts)
914{
915 IEMOP_MNEMONIC("clts");
916 IEMOP_HLP_NO_LOCK_PREFIX();
917 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
918}
919
920
921/** Opcode 0x0f 0x06. */
922FNIEMOP_STUB(iemOp_sysret);
923/** Opcode 0x0f 0x08. */
924FNIEMOP_STUB(iemOp_invd);
925/** Opcode 0x0f 0x09. */
926FNIEMOP_STUB(iemOp_wbinvd);
927/** Opcode 0x0f 0x0b. */
928FNIEMOP_STUB(iemOp_ud2);
929/** Opcode 0x0f 0x0d. */
930FNIEMOP_STUB(iemOp_nop_Ev_prefetch);
931/** Opcode 0x0f 0x0e. */
932FNIEMOP_STUB(iemOp_femms);
933/** Opcode 0x0f 0x0f. */
934FNIEMOP_STUB(iemOp_3Dnow);
935/** Opcode 0x0f 0x10. */
936FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
937/** Opcode 0x0f 0x11. */
938FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
939/** Opcode 0x0f 0x12. */
940FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
941/** Opcode 0x0f 0x13. */
942FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
943/** Opcode 0x0f 0x14. */
944FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
945/** Opcode 0x0f 0x15. */
946FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
947/** Opcode 0x0f 0x16. */
948FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
949/** Opcode 0x0f 0x17. */
950FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
951/** Opcode 0x0f 0x18. */
952FNIEMOP_STUB(iemOp_prefetch_Grp16);
953
954
955/** Opcode 0x0f 0x20. */
956FNIEMOP_DEF(iemOp_mov_Rd_Cd)
957{
958 /* mod is ignored, as is operand size overrides. */
959 IEMOP_MNEMONIC("mov Rd,Cd");
960 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
961 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
962 else
963 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
964
965 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
966 * before the privilege level violation (\#GP). */
967 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
968 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
969 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
970 {
971 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
972 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
973 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
974 iCrReg |= 8;
975 }
976 switch (iCrReg)
977 {
978 case 0: case 2: case 3: case 4: case 8:
979 break;
980 default:
981 return IEMOP_RAISE_INVALID_OPCODE();
982 }
983
984 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
985}
986
987
988/** Opcode 0x0f 0x21. */
989FNIEMOP_DEF(iemOp_mov_Rd_Dd)
990{
991 IEMOP_MNEMONIC("mov Rd,Dd");
992 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
993 IEMOP_HLP_NO_LOCK_PREFIX();
994 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
995 return IEMOP_RAISE_INVALID_OPCODE();
996 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
997 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
998 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
999}
1000
1001
1002/** Opcode 0x0f 0x22. */
1003FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1004{
1005 /* mod is ignored, as is operand size overrides. */
1006 IEMOP_MNEMONIC("mov Cd,Rd");
1007 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1008 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1009 else
1010 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1011
1012 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1013 * before the privilege level violation (\#GP). */
1014 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
1015 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1016 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1017 {
1018 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1019 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1020 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1021 iCrReg |= 8;
1022 }
1023 switch (iCrReg)
1024 {
1025 case 0: case 2: case 3: case 4: case 8:
1026 break;
1027 default:
1028 return IEMOP_RAISE_INVALID_OPCODE();
1029 }
1030
1031 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1032}
1033
1034
1035/** Opcode 0x0f 0x23. */
1036FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1037{
1038 IEMOP_MNEMONIC("mov Dd,Rd");
1039 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
1040 IEMOP_HLP_NO_LOCK_PREFIX();
1041 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1042 return IEMOP_RAISE_INVALID_OPCODE();
1043 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1044 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1045 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1046}
1047
1048
1049/** Opcode 0x0f 0x24. */
1050FNIEMOP_DEF(iemOp_mov_Rd_Td)
1051{
1052 IEMOP_MNEMONIC("mov Rd,Td");
1053/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1054 return IEMOP_RAISE_INVALID_OPCODE();
1055}
1056
1057
1058
1059/** Opcode 0x0f 0x26. */
1060FNIEMOP_DEF(iemOp_mov_Td_Rd)
1061{
1062 IEMOP_MNEMONIC("mov Td,Rd");
1063 return IEMOP_RAISE_INVALID_OPCODE();
1064}
1065
1066
1067/** Opcode 0x0f 0x28. */
1068FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1069/** Opcode 0x0f 0x29. */
1070FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1071/** Opcode 0x0f 0x2a. */
1072FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1073/** Opcode 0x0f 0x2b. */
1074FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1075/** Opcode 0x0f 0x2c. */
1076FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1077/** Opcode 0x0f 0x2d. */
1078FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1079/** Opcode 0x0f 0x2e. */
1080FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1081/** Opcode 0x0f 0x2f. */
1082FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1083/** Opcode 0x0f 0x30. */
1084FNIEMOP_STUB(iemOp_wrmsr);
1085
1086
1087/** Opcode 0x0f 0x31. */
1088FNIEMOP_DEF(iemOp_rdtsc)
1089{
1090 IEMOP_MNEMONIC("rdtsc");
1091 IEMOP_HLP_NO_LOCK_PREFIX();
1092 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1093}
1094
1095
1096/** Opcode 0x0f 0x33. */
1097FNIEMOP_STUB(iemOp_rdmsr);
1098/** Opcode 0x0f 0x34. */
1099FNIEMOP_STUB(iemOp_rdpmc);
1100/** Opcode 0x0f 0x34. */
1101FNIEMOP_STUB(iemOp_sysenter);
1102/** Opcode 0x0f 0x35. */
1103FNIEMOP_STUB(iemOp_sysexit);
1104/** Opcode 0x0f 0x37. */
1105FNIEMOP_STUB(iemOp_getsec);
1106/** Opcode 0x0f 0x38. */
1107FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1108/** Opcode 0x0f 0x39. */
1109FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1110/** Opcode 0x0f 0x3c (?). */
1111FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1112
1113/**
1114 * Implements a conditional move.
1115 *
1116 * Wish there was an obvious way to do this where we could share and reduce
1117 * code bloat.
1118 *
1119 * @param a_Cnd The conditional "microcode" operation.
1120 */
1121#define CMOV_X(a_Cnd) \
1122 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); \
1123 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1124 { \
1125 switch (pIemCpu->enmEffOpSize) \
1126 { \
1127 case IEMMODE_16BIT: \
1128 IEM_MC_BEGIN(0, 1); \
1129 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1130 a_Cnd { \
1131 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1132 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1133 } IEM_MC_ENDIF(); \
1134 IEM_MC_ADVANCE_RIP(); \
1135 IEM_MC_END(); \
1136 return VINF_SUCCESS; \
1137 \
1138 case IEMMODE_32BIT: \
1139 IEM_MC_BEGIN(0, 1); \
1140 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1141 a_Cnd { \
1142 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1143 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1144 } IEM_MC_ELSE() { \
1145 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1146 } IEM_MC_ENDIF(); \
1147 IEM_MC_ADVANCE_RIP(); \
1148 IEM_MC_END(); \
1149 return VINF_SUCCESS; \
1150 \
1151 case IEMMODE_64BIT: \
1152 IEM_MC_BEGIN(0, 1); \
1153 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1154 a_Cnd { \
1155 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1156 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1157 } IEM_MC_ENDIF(); \
1158 IEM_MC_ADVANCE_RIP(); \
1159 IEM_MC_END(); \
1160 return VINF_SUCCESS; \
1161 \
1162 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1163 } \
1164 } \
1165 else \
1166 { \
1167 switch (pIemCpu->enmEffOpSize) \
1168 { \
1169 case IEMMODE_16BIT: \
1170 IEM_MC_BEGIN(0, 2); \
1171 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1172 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1173 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1174 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1175 a_Cnd { \
1176 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1177 } IEM_MC_ENDIF(); \
1178 IEM_MC_ADVANCE_RIP(); \
1179 IEM_MC_END(); \
1180 return VINF_SUCCESS; \
1181 \
1182 case IEMMODE_32BIT: \
1183 IEM_MC_BEGIN(0, 2); \
1184 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1185 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1186 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1187 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1188 a_Cnd { \
1189 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1190 } IEM_MC_ELSE() { \
1191 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1192 } IEM_MC_ENDIF(); \
1193 IEM_MC_ADVANCE_RIP(); \
1194 IEM_MC_END(); \
1195 return VINF_SUCCESS; \
1196 \
1197 case IEMMODE_64BIT: \
1198 IEM_MC_BEGIN(0, 2); \
1199 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1200 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1201 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1202 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1203 a_Cnd { \
1204 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1205 } IEM_MC_ENDIF(); \
1206 IEM_MC_ADVANCE_RIP(); \
1207 IEM_MC_END(); \
1208 return VINF_SUCCESS; \
1209 \
1210 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1211 } \
1212 } do {} while (0)
1213
1214
1215
1216/** Opcode 0x0f 0x40. */
1217FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1218{
1219 IEMOP_MNEMONIC("cmovo Gv,Ev");
1220 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1221}
1222
1223
1224/** Opcode 0x0f 0x41. */
1225FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1226{
1227 IEMOP_MNEMONIC("cmovno Gv,Ev");
1228 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1229}
1230
1231
1232/** Opcode 0x0f 0x42. */
1233FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1234{
1235 IEMOP_MNEMONIC("cmovc Gv,Ev");
1236 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1237}
1238
1239
1240/** Opcode 0x0f 0x43. */
1241FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1242{
1243 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1244 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1245}
1246
1247
1248/** Opcode 0x0f 0x44. */
1249FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1250{
1251 IEMOP_MNEMONIC("cmove Gv,Ev");
1252 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1253}
1254
1255
1256/** Opcode 0x0f 0x45. */
1257FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1258{
1259 IEMOP_MNEMONIC("cmovne Gv,Ev");
1260 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1261}
1262
1263
1264/** Opcode 0x0f 0x46. */
1265FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1266{
1267 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1268 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1269}
1270
1271
1272/** Opcode 0x0f 0x47. */
1273FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1274{
1275 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1276 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1277}
1278
1279
1280/** Opcode 0x0f 0x48. */
1281FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1282{
1283 IEMOP_MNEMONIC("cmovs Gv,Ev");
1284 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1285}
1286
1287
1288/** Opcode 0x0f 0x49. */
1289FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1290{
1291 IEMOP_MNEMONIC("cmovns Gv,Ev");
1292 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1293}
1294
1295
1296/** Opcode 0x0f 0x4a. */
1297FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1298{
1299 IEMOP_MNEMONIC("cmovp Gv,Ev");
1300 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1301}
1302
1303
1304/** Opcode 0x0f 0x4b. */
1305FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1306{
1307 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1308 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1309}
1310
1311
1312/** Opcode 0x0f 0x4c. */
1313FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1314{
1315 IEMOP_MNEMONIC("cmovl Gv,Ev");
1316 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1317}
1318
1319
1320/** Opcode 0x0f 0x4d. */
1321FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1322{
1323 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1324 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1325}
1326
1327
1328/** Opcode 0x0f 0x4e. */
1329FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1330{
1331 IEMOP_MNEMONIC("cmovle Gv,Ev");
1332 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1333}
1334
1335
1336/** Opcode 0x0f 0x4f. */
1337FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1338{
1339 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1340 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1341}
1342
1343#undef CMOV_X
1344
1345/** Opcode 0x0f 0x50. */
1346FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1347/** Opcode 0x0f 0x51. */
1348FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1349/** Opcode 0x0f 0x52. */
1350FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1351/** Opcode 0x0f 0x53. */
1352FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1353/** Opcode 0x0f 0x54. */
1354FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1355/** Opcode 0x0f 0x55. */
1356FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1357/** Opcode 0x0f 0x56. */
1358FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1359/** Opcode 0x0f 0x57. */
1360FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1361/** Opcode 0x0f 0x58. */
1362FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1363/** Opcode 0x0f 0x59. */
1364FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1365/** Opcode 0x0f 0x5a. */
1366FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1367/** Opcode 0x0f 0x5b. */
1368FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1369/** Opcode 0x0f 0x5c. */
1370FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1371/** Opcode 0x0f 0x5d. */
1372FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1373/** Opcode 0x0f 0x5e. */
1374FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1375/** Opcode 0x0f 0x5f. */
1376FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1377/** Opcode 0x0f 0x60. */
1378FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1379/** Opcode 0x0f 0x61. */
1380FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1381/** Opcode 0x0f 0x62. */
1382FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1383/** Opcode 0x0f 0x63. */
1384FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1385/** Opcode 0x0f 0x64. */
1386FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1387/** Opcode 0x0f 0x65. */
1388FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1389/** Opcode 0x0f 0x66. */
1390FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1391/** Opcode 0x0f 0x67. */
1392FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1393/** Opcode 0x0f 0x68. */
1394FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1395/** Opcode 0x0f 0x69. */
1396FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1397/** Opcode 0x0f 0x6a. */
1398FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1399/** Opcode 0x0f 0x6b. */
1400FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1401/** Opcode 0x0f 0x6c. */
1402FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1403/** Opcode 0x0f 0x6d. */
1404FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1405/** Opcode 0x0f 0x6e. */
1406FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1407/** Opcode 0x0f 0x6f. */
1408FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1409/** Opcode 0x0f 0x70. */
1410FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1411/** Opcode 0x0f 0x71. */
1412FNIEMOP_STUB(iemOp_Grp12);
1413/** Opcode 0x0f 0x72. */
1414FNIEMOP_STUB(iemOp_Grp13);
1415/** Opcode 0x0f 0x73. */
1416FNIEMOP_STUB(iemOp_Grp14);
1417/** Opcode 0x0f 0x74. */
1418FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1419/** Opcode 0x0f 0x75. */
1420FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1421/** Opcode 0x0f 0x76. */
1422FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1423/** Opcode 0x0f 0x77. */
1424FNIEMOP_STUB(iemOp_emms);
1425/** Opcode 0x0f 0x78. */
1426FNIEMOP_STUB(iemOp_vmread);
1427/** Opcode 0x0f 0x79. */
1428FNIEMOP_STUB(iemOp_vmwrite);
1429/** Opcode 0x0f 0x7c. */
1430FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1431/** Opcode 0x0f 0x7d. */
1432FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1433/** Opcode 0x0f 0x7e. */
1434FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1435/** Opcode 0x0f 0x7f. */
1436FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1437
1438
1439/** Opcode 0x0f 0x80. */
1440FNIEMOP_DEF(iemOp_jo_Jv)
1441{
1442 IEMOP_MNEMONIC("jo Jv");
1443 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1444 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1445 {
1446 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1447 IEMOP_HLP_NO_LOCK_PREFIX();
1448
1449 IEM_MC_BEGIN(0, 0);
1450 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1451 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1452 } IEM_MC_ELSE() {
1453 IEM_MC_ADVANCE_RIP();
1454 } IEM_MC_ENDIF();
1455 IEM_MC_END();
1456 }
1457 else
1458 {
1459 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1460 IEMOP_HLP_NO_LOCK_PREFIX();
1461
1462 IEM_MC_BEGIN(0, 0);
1463 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1464 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1465 } IEM_MC_ELSE() {
1466 IEM_MC_ADVANCE_RIP();
1467 } IEM_MC_ENDIF();
1468 IEM_MC_END();
1469 }
1470 return VINF_SUCCESS;
1471}
1472
1473
1474/** Opcode 0x0f 0x81. */
1475FNIEMOP_DEF(iemOp_jno_Jv)
1476{
1477 IEMOP_MNEMONIC("jno Jv");
1478 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1479 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1480 {
1481 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1482 IEMOP_HLP_NO_LOCK_PREFIX();
1483
1484 IEM_MC_BEGIN(0, 0);
1485 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1486 IEM_MC_ADVANCE_RIP();
1487 } IEM_MC_ELSE() {
1488 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1489 } IEM_MC_ENDIF();
1490 IEM_MC_END();
1491 }
1492 else
1493 {
1494 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1495 IEMOP_HLP_NO_LOCK_PREFIX();
1496
1497 IEM_MC_BEGIN(0, 0);
1498 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1499 IEM_MC_ADVANCE_RIP();
1500 } IEM_MC_ELSE() {
1501 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1502 } IEM_MC_ENDIF();
1503 IEM_MC_END();
1504 }
1505 return VINF_SUCCESS;
1506}
1507
1508
1509/** Opcode 0x0f 0x82. */
1510FNIEMOP_DEF(iemOp_jc_Jv)
1511{
1512 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1513 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1514 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1515 {
1516 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1517 IEMOP_HLP_NO_LOCK_PREFIX();
1518
1519 IEM_MC_BEGIN(0, 0);
1520 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1521 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1522 } IEM_MC_ELSE() {
1523 IEM_MC_ADVANCE_RIP();
1524 } IEM_MC_ENDIF();
1525 IEM_MC_END();
1526 }
1527 else
1528 {
1529 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1530 IEMOP_HLP_NO_LOCK_PREFIX();
1531
1532 IEM_MC_BEGIN(0, 0);
1533 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1534 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1535 } IEM_MC_ELSE() {
1536 IEM_MC_ADVANCE_RIP();
1537 } IEM_MC_ENDIF();
1538 IEM_MC_END();
1539 }
1540 return VINF_SUCCESS;
1541}
1542
1543
1544/** Opcode 0x0f 0x83. */
1545FNIEMOP_DEF(iemOp_jnc_Jv)
1546{
1547 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1548 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1549 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1550 {
1551 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1552 IEMOP_HLP_NO_LOCK_PREFIX();
1553
1554 IEM_MC_BEGIN(0, 0);
1555 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1556 IEM_MC_ADVANCE_RIP();
1557 } IEM_MC_ELSE() {
1558 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1559 } IEM_MC_ENDIF();
1560 IEM_MC_END();
1561 }
1562 else
1563 {
1564 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1565 IEMOP_HLP_NO_LOCK_PREFIX();
1566
1567 IEM_MC_BEGIN(0, 0);
1568 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1569 IEM_MC_ADVANCE_RIP();
1570 } IEM_MC_ELSE() {
1571 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1572 } IEM_MC_ENDIF();
1573 IEM_MC_END();
1574 }
1575 return VINF_SUCCESS;
1576}
1577
1578
1579/** Opcode 0x0f 0x84. */
1580FNIEMOP_DEF(iemOp_je_Jv)
1581{
1582 IEMOP_MNEMONIC("je/jz Jv");
1583 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1584 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1585 {
1586 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1587 IEMOP_HLP_NO_LOCK_PREFIX();
1588
1589 IEM_MC_BEGIN(0, 0);
1590 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1591 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1592 } IEM_MC_ELSE() {
1593 IEM_MC_ADVANCE_RIP();
1594 } IEM_MC_ENDIF();
1595 IEM_MC_END();
1596 }
1597 else
1598 {
1599 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1600 IEMOP_HLP_NO_LOCK_PREFIX();
1601
1602 IEM_MC_BEGIN(0, 0);
1603 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1604 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1605 } IEM_MC_ELSE() {
1606 IEM_MC_ADVANCE_RIP();
1607 } IEM_MC_ENDIF();
1608 IEM_MC_END();
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/** Opcode 0x0f 0x85. */
1615FNIEMOP_DEF(iemOp_jne_Jv)
1616{
1617 IEMOP_MNEMONIC("jne/jnz Jv");
1618 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1619 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1620 {
1621 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1622 IEMOP_HLP_NO_LOCK_PREFIX();
1623
1624 IEM_MC_BEGIN(0, 0);
1625 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1626 IEM_MC_ADVANCE_RIP();
1627 } IEM_MC_ELSE() {
1628 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1629 } IEM_MC_ENDIF();
1630 IEM_MC_END();
1631 }
1632 else
1633 {
1634 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1635 IEMOP_HLP_NO_LOCK_PREFIX();
1636
1637 IEM_MC_BEGIN(0, 0);
1638 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1639 IEM_MC_ADVANCE_RIP();
1640 } IEM_MC_ELSE() {
1641 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1642 } IEM_MC_ENDIF();
1643 IEM_MC_END();
1644 }
1645 return VINF_SUCCESS;
1646}
1647
1648
1649/** Opcode 0x0f 0x86. */
1650FNIEMOP_DEF(iemOp_jbe_Jv)
1651{
1652 IEMOP_MNEMONIC("jbe/jna Jv");
1653 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1654 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1655 {
1656 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1657 IEMOP_HLP_NO_LOCK_PREFIX();
1658
1659 IEM_MC_BEGIN(0, 0);
1660 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1661 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1662 } IEM_MC_ELSE() {
1663 IEM_MC_ADVANCE_RIP();
1664 } IEM_MC_ENDIF();
1665 IEM_MC_END();
1666 }
1667 else
1668 {
1669 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1670 IEMOP_HLP_NO_LOCK_PREFIX();
1671
1672 IEM_MC_BEGIN(0, 0);
1673 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1674 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1675 } IEM_MC_ELSE() {
1676 IEM_MC_ADVANCE_RIP();
1677 } IEM_MC_ENDIF();
1678 IEM_MC_END();
1679 }
1680 return VINF_SUCCESS;
1681}
1682
1683
1684/** Opcode 0x0f 0x87. */
1685FNIEMOP_DEF(iemOp_jnbe_Jv)
1686{
1687 IEMOP_MNEMONIC("jnbe/ja Jv");
1688 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1689 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1690 {
1691 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1692 IEMOP_HLP_NO_LOCK_PREFIX();
1693
1694 IEM_MC_BEGIN(0, 0);
1695 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1696 IEM_MC_ADVANCE_RIP();
1697 } IEM_MC_ELSE() {
1698 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1699 } IEM_MC_ENDIF();
1700 IEM_MC_END();
1701 }
1702 else
1703 {
1704 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1705 IEMOP_HLP_NO_LOCK_PREFIX();
1706
1707 IEM_MC_BEGIN(0, 0);
1708 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1709 IEM_MC_ADVANCE_RIP();
1710 } IEM_MC_ELSE() {
1711 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1712 } IEM_MC_ENDIF();
1713 IEM_MC_END();
1714 }
1715 return VINF_SUCCESS;
1716}
1717
1718
1719/** Opcode 0x0f 0x88. */
1720FNIEMOP_DEF(iemOp_js_Jv)
1721{
1722 IEMOP_MNEMONIC("js Jv");
1723 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1724 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1725 {
1726 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1727 IEMOP_HLP_NO_LOCK_PREFIX();
1728
1729 IEM_MC_BEGIN(0, 0);
1730 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1731 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1732 } IEM_MC_ELSE() {
1733 IEM_MC_ADVANCE_RIP();
1734 } IEM_MC_ENDIF();
1735 IEM_MC_END();
1736 }
1737 else
1738 {
1739 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1740 IEMOP_HLP_NO_LOCK_PREFIX();
1741
1742 IEM_MC_BEGIN(0, 0);
1743 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1744 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1745 } IEM_MC_ELSE() {
1746 IEM_MC_ADVANCE_RIP();
1747 } IEM_MC_ENDIF();
1748 IEM_MC_END();
1749 }
1750 return VINF_SUCCESS;
1751}
1752
1753
1754/** Opcode 0x0f 0x89. */
1755FNIEMOP_DEF(iemOp_jns_Jv)
1756{
1757 IEMOP_MNEMONIC("jns Jv");
1758 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1759 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1760 {
1761 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1762 IEMOP_HLP_NO_LOCK_PREFIX();
1763
1764 IEM_MC_BEGIN(0, 0);
1765 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1766 IEM_MC_ADVANCE_RIP();
1767 } IEM_MC_ELSE() {
1768 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1769 } IEM_MC_ENDIF();
1770 IEM_MC_END();
1771 }
1772 else
1773 {
1774 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1775 IEMOP_HLP_NO_LOCK_PREFIX();
1776
1777 IEM_MC_BEGIN(0, 0);
1778 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1779 IEM_MC_ADVANCE_RIP();
1780 } IEM_MC_ELSE() {
1781 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1782 } IEM_MC_ENDIF();
1783 IEM_MC_END();
1784 }
1785 return VINF_SUCCESS;
1786}
1787
1788
1789/** Opcode 0x0f 0x8a. */
1790FNIEMOP_DEF(iemOp_jp_Jv)
1791{
1792 IEMOP_MNEMONIC("jp Jv");
1793 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1794 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1795 {
1796 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1797 IEMOP_HLP_NO_LOCK_PREFIX();
1798
1799 IEM_MC_BEGIN(0, 0);
1800 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1801 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1802 } IEM_MC_ELSE() {
1803 IEM_MC_ADVANCE_RIP();
1804 } IEM_MC_ENDIF();
1805 IEM_MC_END();
1806 }
1807 else
1808 {
1809 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1810 IEMOP_HLP_NO_LOCK_PREFIX();
1811
1812 IEM_MC_BEGIN(0, 0);
1813 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1814 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1815 } IEM_MC_ELSE() {
1816 IEM_MC_ADVANCE_RIP();
1817 } IEM_MC_ENDIF();
1818 IEM_MC_END();
1819 }
1820 return VINF_SUCCESS;
1821}
1822
1823
1824/** Opcode 0x0f 0x8b. */
1825FNIEMOP_DEF(iemOp_jnp_Jv)
1826{
1827 IEMOP_MNEMONIC("jo Jv");
1828 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1829 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1830 {
1831 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1832 IEMOP_HLP_NO_LOCK_PREFIX();
1833
1834 IEM_MC_BEGIN(0, 0);
1835 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1836 IEM_MC_ADVANCE_RIP();
1837 } IEM_MC_ELSE() {
1838 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1839 } IEM_MC_ENDIF();
1840 IEM_MC_END();
1841 }
1842 else
1843 {
1844 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1845 IEMOP_HLP_NO_LOCK_PREFIX();
1846
1847 IEM_MC_BEGIN(0, 0);
1848 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1849 IEM_MC_ADVANCE_RIP();
1850 } IEM_MC_ELSE() {
1851 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1852 } IEM_MC_ENDIF();
1853 IEM_MC_END();
1854 }
1855 return VINF_SUCCESS;
1856}
1857
1858
1859/** Opcode 0x0f 0x8c. */
1860FNIEMOP_DEF(iemOp_jl_Jv)
1861{
1862 IEMOP_MNEMONIC("jl/jnge Jv");
1863 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1864 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1865 {
1866 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1867 IEMOP_HLP_NO_LOCK_PREFIX();
1868
1869 IEM_MC_BEGIN(0, 0);
1870 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1871 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1872 } IEM_MC_ELSE() {
1873 IEM_MC_ADVANCE_RIP();
1874 } IEM_MC_ENDIF();
1875 IEM_MC_END();
1876 }
1877 else
1878 {
1879 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1880 IEMOP_HLP_NO_LOCK_PREFIX();
1881
1882 IEM_MC_BEGIN(0, 0);
1883 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1884 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1885 } IEM_MC_ELSE() {
1886 IEM_MC_ADVANCE_RIP();
1887 } IEM_MC_ENDIF();
1888 IEM_MC_END();
1889 }
1890 return VINF_SUCCESS;
1891}
1892
1893
1894/** Opcode 0x0f 0x8d. */
1895FNIEMOP_DEF(iemOp_jnl_Jv)
1896{
1897 IEMOP_MNEMONIC("jnl/jge Jv");
1898 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1899 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1900 {
1901 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1902 IEMOP_HLP_NO_LOCK_PREFIX();
1903
1904 IEM_MC_BEGIN(0, 0);
1905 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1906 IEM_MC_ADVANCE_RIP();
1907 } IEM_MC_ELSE() {
1908 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1909 } IEM_MC_ENDIF();
1910 IEM_MC_END();
1911 }
1912 else
1913 {
1914 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1915 IEMOP_HLP_NO_LOCK_PREFIX();
1916
1917 IEM_MC_BEGIN(0, 0);
1918 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1919 IEM_MC_ADVANCE_RIP();
1920 } IEM_MC_ELSE() {
1921 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1922 } IEM_MC_ENDIF();
1923 IEM_MC_END();
1924 }
1925 return VINF_SUCCESS;
1926}
1927
1928
1929/** Opcode 0x0f 0x8e. */
1930FNIEMOP_DEF(iemOp_jle_Jv)
1931{
1932 IEMOP_MNEMONIC("jle/jng Jv");
1933 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1934 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1935 {
1936 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1937 IEMOP_HLP_NO_LOCK_PREFIX();
1938
1939 IEM_MC_BEGIN(0, 0);
1940 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1941 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1942 } IEM_MC_ELSE() {
1943 IEM_MC_ADVANCE_RIP();
1944 } IEM_MC_ENDIF();
1945 IEM_MC_END();
1946 }
1947 else
1948 {
1949 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1950 IEMOP_HLP_NO_LOCK_PREFIX();
1951
1952 IEM_MC_BEGIN(0, 0);
1953 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1954 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1955 } IEM_MC_ELSE() {
1956 IEM_MC_ADVANCE_RIP();
1957 } IEM_MC_ENDIF();
1958 IEM_MC_END();
1959 }
1960 return VINF_SUCCESS;
1961}
1962
1963
1964/** Opcode 0x0f 0x8f. */
1965FNIEMOP_DEF(iemOp_jnle_Jv)
1966{
1967 IEMOP_MNEMONIC("jnle/jg Jv");
1968 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1969 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1970 {
1971 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
1972 IEMOP_HLP_NO_LOCK_PREFIX();
1973
1974 IEM_MC_BEGIN(0, 0);
1975 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1976 IEM_MC_ADVANCE_RIP();
1977 } IEM_MC_ELSE() {
1978 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
1979 } IEM_MC_ENDIF();
1980 IEM_MC_END();
1981 }
1982 else
1983 {
1984 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
1985 IEMOP_HLP_NO_LOCK_PREFIX();
1986
1987 IEM_MC_BEGIN(0, 0);
1988 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1989 IEM_MC_ADVANCE_RIP();
1990 } IEM_MC_ELSE() {
1991 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
1992 } IEM_MC_ENDIF();
1993 IEM_MC_END();
1994 }
1995 return VINF_SUCCESS;
1996}
1997
1998
1999/** Opcode 0x0f 0x90. */
2000FNIEMOP_DEF(iemOp_seto_Eb)
2001{
2002 IEMOP_MNEMONIC("seto Eb");
2003 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2004 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2005
2006 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2007 * any way. AMD says it's "unused", whatever that means. We're
2008 * ignoring for now. */
2009 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2010 {
2011 /* register target */
2012 IEM_MC_BEGIN(0, 0);
2013 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2014 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2015 } IEM_MC_ELSE() {
2016 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2017 } IEM_MC_ENDIF();
2018 IEM_MC_ADVANCE_RIP();
2019 IEM_MC_END();
2020 }
2021 else
2022 {
2023 /* memory target */
2024 IEM_MC_BEGIN(0, 1);
2025 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2026 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2027 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2028 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2029 } IEM_MC_ELSE() {
2030 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2031 } IEM_MC_ENDIF();
2032 IEM_MC_ADVANCE_RIP();
2033 IEM_MC_END();
2034 }
2035 return VINF_SUCCESS;
2036}
2037
2038
2039/** Opcode 0x0f 0x91. */
2040FNIEMOP_DEF(iemOp_setno_Eb)
2041{
2042 IEMOP_MNEMONIC("setno Eb");
2043 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2044 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2045
2046 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2047 * any way. AMD says it's "unused", whatever that means. We're
2048 * ignoring for now. */
2049 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2050 {
2051 /* register target */
2052 IEM_MC_BEGIN(0, 0);
2053 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2054 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2055 } IEM_MC_ELSE() {
2056 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2057 } IEM_MC_ENDIF();
2058 IEM_MC_ADVANCE_RIP();
2059 IEM_MC_END();
2060 }
2061 else
2062 {
2063 /* memory target */
2064 IEM_MC_BEGIN(0, 1);
2065 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2066 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2067 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2068 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2069 } IEM_MC_ELSE() {
2070 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2071 } IEM_MC_ENDIF();
2072 IEM_MC_ADVANCE_RIP();
2073 IEM_MC_END();
2074 }
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/** Opcode 0x0f 0x92. */
2080FNIEMOP_DEF(iemOp_setc_Eb)
2081{
2082 IEMOP_MNEMONIC("setc Eb");
2083 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2084 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2085
2086 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2087 * any way. AMD says it's "unused", whatever that means. We're
2088 * ignoring for now. */
2089 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2090 {
2091 /* register target */
2092 IEM_MC_BEGIN(0, 0);
2093 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2094 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2095 } IEM_MC_ELSE() {
2096 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2097 } IEM_MC_ENDIF();
2098 IEM_MC_ADVANCE_RIP();
2099 IEM_MC_END();
2100 }
2101 else
2102 {
2103 /* memory target */
2104 IEM_MC_BEGIN(0, 1);
2105 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2106 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2107 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2108 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2109 } IEM_MC_ELSE() {
2110 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2111 } IEM_MC_ENDIF();
2112 IEM_MC_ADVANCE_RIP();
2113 IEM_MC_END();
2114 }
2115 return VINF_SUCCESS;
2116}
2117
2118
2119/** Opcode 0x0f 0x93. */
2120FNIEMOP_DEF(iemOp_setnc_Eb)
2121{
2122 IEMOP_MNEMONIC("setnc Eb");
2123 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2124 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2125
2126 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2127 * any way. AMD says it's "unused", whatever that means. We're
2128 * ignoring for now. */
2129 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2130 {
2131 /* register target */
2132 IEM_MC_BEGIN(0, 0);
2133 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2134 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2135 } IEM_MC_ELSE() {
2136 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2137 } IEM_MC_ENDIF();
2138 IEM_MC_ADVANCE_RIP();
2139 IEM_MC_END();
2140 }
2141 else
2142 {
2143 /* memory target */
2144 IEM_MC_BEGIN(0, 1);
2145 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2146 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2147 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2148 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2149 } IEM_MC_ELSE() {
2150 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2151 } IEM_MC_ENDIF();
2152 IEM_MC_ADVANCE_RIP();
2153 IEM_MC_END();
2154 }
2155 return VINF_SUCCESS;
2156}
2157
2158
2159/** Opcode 0x0f 0x94. */
2160FNIEMOP_DEF(iemOp_sete_Eb)
2161{
2162 IEMOP_MNEMONIC("sete Eb");
2163 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2164 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2165
2166 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2167 * any way. AMD says it's "unused", whatever that means. We're
2168 * ignoring for now. */
2169 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2170 {
2171 /* register target */
2172 IEM_MC_BEGIN(0, 0);
2173 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2174 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2175 } IEM_MC_ELSE() {
2176 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2177 } IEM_MC_ENDIF();
2178 IEM_MC_ADVANCE_RIP();
2179 IEM_MC_END();
2180 }
2181 else
2182 {
2183 /* memory target */
2184 IEM_MC_BEGIN(0, 1);
2185 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2186 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2187 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2188 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2189 } IEM_MC_ELSE() {
2190 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2191 } IEM_MC_ENDIF();
2192 IEM_MC_ADVANCE_RIP();
2193 IEM_MC_END();
2194 }
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/** Opcode 0x0f 0x95. */
2200FNIEMOP_DEF(iemOp_setne_Eb)
2201{
2202 IEMOP_MNEMONIC("setne Eb");
2203 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2204 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2205
2206 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2207 * any way. AMD says it's "unused", whatever that means. We're
2208 * ignoring for now. */
2209 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2210 {
2211 /* register target */
2212 IEM_MC_BEGIN(0, 0);
2213 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2214 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2215 } IEM_MC_ELSE() {
2216 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2217 } IEM_MC_ENDIF();
2218 IEM_MC_ADVANCE_RIP();
2219 IEM_MC_END();
2220 }
2221 else
2222 {
2223 /* memory target */
2224 IEM_MC_BEGIN(0, 1);
2225 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2226 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2227 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2228 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2229 } IEM_MC_ELSE() {
2230 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2231 } IEM_MC_ENDIF();
2232 IEM_MC_ADVANCE_RIP();
2233 IEM_MC_END();
2234 }
2235 return VINF_SUCCESS;
2236}
2237
2238
2239/** Opcode 0x0f 0x96. */
2240FNIEMOP_DEF(iemOp_setbe_Eb)
2241{
2242 IEMOP_MNEMONIC("setbe Eb");
2243 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2244 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2245
2246 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2247 * any way. AMD says it's "unused", whatever that means. We're
2248 * ignoring for now. */
2249 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2250 {
2251 /* register target */
2252 IEM_MC_BEGIN(0, 0);
2253 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2254 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2255 } IEM_MC_ELSE() {
2256 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2257 } IEM_MC_ENDIF();
2258 IEM_MC_ADVANCE_RIP();
2259 IEM_MC_END();
2260 }
2261 else
2262 {
2263 /* memory target */
2264 IEM_MC_BEGIN(0, 1);
2265 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2266 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2267 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2268 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2269 } IEM_MC_ELSE() {
2270 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2271 } IEM_MC_ENDIF();
2272 IEM_MC_ADVANCE_RIP();
2273 IEM_MC_END();
2274 }
2275 return VINF_SUCCESS;
2276}
2277
2278
2279/** Opcode 0x0f 0x97. */
2280FNIEMOP_DEF(iemOp_setnbe_Eb)
2281{
2282 IEMOP_MNEMONIC("setnbe Eb");
2283 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2284 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2285
2286 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2287 * any way. AMD says it's "unused", whatever that means. We're
2288 * ignoring for now. */
2289 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2290 {
2291 /* register target */
2292 IEM_MC_BEGIN(0, 0);
2293 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2294 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2295 } IEM_MC_ELSE() {
2296 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2297 } IEM_MC_ENDIF();
2298 IEM_MC_ADVANCE_RIP();
2299 IEM_MC_END();
2300 }
2301 else
2302 {
2303 /* memory target */
2304 IEM_MC_BEGIN(0, 1);
2305 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2306 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2307 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2308 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2309 } IEM_MC_ELSE() {
2310 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2311 } IEM_MC_ENDIF();
2312 IEM_MC_ADVANCE_RIP();
2313 IEM_MC_END();
2314 }
2315 return VINF_SUCCESS;
2316}
2317
2318
2319/** Opcode 0x0f 0x98. */
2320FNIEMOP_DEF(iemOp_sets_Eb)
2321{
2322 IEMOP_MNEMONIC("sets Eb");
2323 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2324 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2325
2326 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2327 * any way. AMD says it's "unused", whatever that means. We're
2328 * ignoring for now. */
2329 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2330 {
2331 /* register target */
2332 IEM_MC_BEGIN(0, 0);
2333 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2334 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2335 } IEM_MC_ELSE() {
2336 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2337 } IEM_MC_ENDIF();
2338 IEM_MC_ADVANCE_RIP();
2339 IEM_MC_END();
2340 }
2341 else
2342 {
2343 /* memory target */
2344 IEM_MC_BEGIN(0, 1);
2345 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2346 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2347 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2348 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2349 } IEM_MC_ELSE() {
2350 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2351 } IEM_MC_ENDIF();
2352 IEM_MC_ADVANCE_RIP();
2353 IEM_MC_END();
2354 }
2355 return VINF_SUCCESS;
2356}
2357
2358
2359/** Opcode 0x0f 0x99. */
2360FNIEMOP_DEF(iemOp_setns_Eb)
2361{
2362 IEMOP_MNEMONIC("setns Eb");
2363 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2364 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2365
2366 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2367 * any way. AMD says it's "unused", whatever that means. We're
2368 * ignoring for now. */
2369 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2370 {
2371 /* register target */
2372 IEM_MC_BEGIN(0, 0);
2373 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2374 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2375 } IEM_MC_ELSE() {
2376 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2377 } IEM_MC_ENDIF();
2378 IEM_MC_ADVANCE_RIP();
2379 IEM_MC_END();
2380 }
2381 else
2382 {
2383 /* memory target */
2384 IEM_MC_BEGIN(0, 1);
2385 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2386 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2387 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2388 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2389 } IEM_MC_ELSE() {
2390 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2391 } IEM_MC_ENDIF();
2392 IEM_MC_ADVANCE_RIP();
2393 IEM_MC_END();
2394 }
2395 return VINF_SUCCESS;
2396}
2397
2398
2399/** Opcode 0x0f 0x9a. */
2400FNIEMOP_DEF(iemOp_setp_Eb)
2401{
2402 IEMOP_MNEMONIC("setnp Eb");
2403 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2404 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2405
2406 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2407 * any way. AMD says it's "unused", whatever that means. We're
2408 * ignoring for now. */
2409 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2410 {
2411 /* register target */
2412 IEM_MC_BEGIN(0, 0);
2413 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2414 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2415 } IEM_MC_ELSE() {
2416 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2417 } IEM_MC_ENDIF();
2418 IEM_MC_ADVANCE_RIP();
2419 IEM_MC_END();
2420 }
2421 else
2422 {
2423 /* memory target */
2424 IEM_MC_BEGIN(0, 1);
2425 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2426 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2427 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2428 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2429 } IEM_MC_ELSE() {
2430 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2431 } IEM_MC_ENDIF();
2432 IEM_MC_ADVANCE_RIP();
2433 IEM_MC_END();
2434 }
2435 return VINF_SUCCESS;
2436}
2437
2438
2439/** Opcode 0x0f 0x9b. */
2440FNIEMOP_DEF(iemOp_setnp_Eb)
2441{
2442 IEMOP_MNEMONIC("setnp Eb");
2443 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2444 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2445
2446 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2447 * any way. AMD says it's "unused", whatever that means. We're
2448 * ignoring for now. */
2449 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2450 {
2451 /* register target */
2452 IEM_MC_BEGIN(0, 0);
2453 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2454 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2455 } IEM_MC_ELSE() {
2456 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2457 } IEM_MC_ENDIF();
2458 IEM_MC_ADVANCE_RIP();
2459 IEM_MC_END();
2460 }
2461 else
2462 {
2463 /* memory target */
2464 IEM_MC_BEGIN(0, 1);
2465 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2466 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2467 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2468 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2469 } IEM_MC_ELSE() {
2470 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2471 } IEM_MC_ENDIF();
2472 IEM_MC_ADVANCE_RIP();
2473 IEM_MC_END();
2474 }
2475 return VINF_SUCCESS;
2476}
2477
2478
2479/** Opcode 0x0f 0x9c. */
2480FNIEMOP_DEF(iemOp_setl_Eb)
2481{
2482 IEMOP_MNEMONIC("setl Eb");
2483 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2484 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2485
2486 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2487 * any way. AMD says it's "unused", whatever that means. We're
2488 * ignoring for now. */
2489 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2490 {
2491 /* register target */
2492 IEM_MC_BEGIN(0, 0);
2493 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2494 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2495 } IEM_MC_ELSE() {
2496 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2497 } IEM_MC_ENDIF();
2498 IEM_MC_ADVANCE_RIP();
2499 IEM_MC_END();
2500 }
2501 else
2502 {
2503 /* memory target */
2504 IEM_MC_BEGIN(0, 1);
2505 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2506 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2507 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2508 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2509 } IEM_MC_ELSE() {
2510 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2511 } IEM_MC_ENDIF();
2512 IEM_MC_ADVANCE_RIP();
2513 IEM_MC_END();
2514 }
2515 return VINF_SUCCESS;
2516}
2517
2518
2519/** Opcode 0x0f 0x9d. */
2520FNIEMOP_DEF(iemOp_setnl_Eb)
2521{
2522 IEMOP_MNEMONIC("setnl Eb");
2523 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2524 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2525
2526 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2527 * any way. AMD says it's "unused", whatever that means. We're
2528 * ignoring for now. */
2529 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2530 {
2531 /* register target */
2532 IEM_MC_BEGIN(0, 0);
2533 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2534 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2535 } IEM_MC_ELSE() {
2536 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2537 } IEM_MC_ENDIF();
2538 IEM_MC_ADVANCE_RIP();
2539 IEM_MC_END();
2540 }
2541 else
2542 {
2543 /* memory target */
2544 IEM_MC_BEGIN(0, 1);
2545 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2546 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2547 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2548 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2549 } IEM_MC_ELSE() {
2550 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2551 } IEM_MC_ENDIF();
2552 IEM_MC_ADVANCE_RIP();
2553 IEM_MC_END();
2554 }
2555 return VINF_SUCCESS;
2556}
2557
2558
2559/** Opcode 0x0f 0x9e. */
2560FNIEMOP_DEF(iemOp_setle_Eb)
2561{
2562 IEMOP_MNEMONIC("setle Eb");
2563 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2564 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2565
2566 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2567 * any way. AMD says it's "unused", whatever that means. We're
2568 * ignoring for now. */
2569 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2570 {
2571 /* register target */
2572 IEM_MC_BEGIN(0, 0);
2573 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2574 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2575 } IEM_MC_ELSE() {
2576 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2577 } IEM_MC_ENDIF();
2578 IEM_MC_ADVANCE_RIP();
2579 IEM_MC_END();
2580 }
2581 else
2582 {
2583 /* memory target */
2584 IEM_MC_BEGIN(0, 1);
2585 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2587 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2588 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2589 } IEM_MC_ELSE() {
2590 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2591 } IEM_MC_ENDIF();
2592 IEM_MC_ADVANCE_RIP();
2593 IEM_MC_END();
2594 }
2595 return VINF_SUCCESS;
2596}
2597
2598
2599/** Opcode 0x0f 0x9f. */
2600FNIEMOP_DEF(iemOp_setnle_Eb)
2601{
2602 IEMOP_MNEMONIC("setnle Eb");
2603 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2604 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2605
2606 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2607 * any way. AMD says it's "unused", whatever that means. We're
2608 * ignoring for now. */
2609 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2610 {
2611 /* register target */
2612 IEM_MC_BEGIN(0, 0);
2613 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2614 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2615 } IEM_MC_ELSE() {
2616 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2617 } IEM_MC_ENDIF();
2618 IEM_MC_ADVANCE_RIP();
2619 IEM_MC_END();
2620 }
2621 else
2622 {
2623 /* memory target */
2624 IEM_MC_BEGIN(0, 1);
2625 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2626 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2627 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2628 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2629 } IEM_MC_ELSE() {
2630 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2631 } IEM_MC_ENDIF();
2632 IEM_MC_ADVANCE_RIP();
2633 IEM_MC_END();
2634 }
2635 return VINF_SUCCESS;
2636}
2637
2638
2639/**
2640 * Common 'push segment-register' helper.
2641 */
2642FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2643{
2644 IEMOP_HLP_NO_LOCK_PREFIX();
2645 if (iReg < X86_SREG_FS)
2646 IEMOP_HLP_NO_64BIT();
2647 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2648
2649 switch (pIemCpu->enmEffOpSize)
2650 {
2651 case IEMMODE_16BIT:
2652 IEM_MC_BEGIN(0, 1);
2653 IEM_MC_LOCAL(uint16_t, u16Value);
2654 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2655 IEM_MC_PUSH_U16(u16Value);
2656 IEM_MC_ADVANCE_RIP();
2657 IEM_MC_END();
2658 break;
2659
2660 case IEMMODE_32BIT:
2661 IEM_MC_BEGIN(0, 1);
2662 IEM_MC_LOCAL(uint32_t, u32Value);
2663 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2664 IEM_MC_PUSH_U32(u32Value);
2665 IEM_MC_ADVANCE_RIP();
2666 IEM_MC_END();
2667 break;
2668
2669 case IEMMODE_64BIT:
2670 IEM_MC_BEGIN(0, 1);
2671 IEM_MC_LOCAL(uint64_t, u64Value);
2672 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2673 IEM_MC_PUSH_U64(u64Value);
2674 IEM_MC_ADVANCE_RIP();
2675 IEM_MC_END();
2676 break;
2677 }
2678
2679 return VINF_SUCCESS;
2680}
2681
2682
2683/** Opcode 0x0f 0xa0. */
2684FNIEMOP_DEF(iemOp_push_fs)
2685{
2686 IEMOP_MNEMONIC("push fs");
2687 IEMOP_HLP_NO_LOCK_PREFIX();
2688 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2689}
2690
2691
2692/** Opcode 0x0f 0xa1. */
2693FNIEMOP_DEF(iemOp_pop_fs)
2694{
2695 IEMOP_MNEMONIC("pop fs");
2696 IEMOP_HLP_NO_LOCK_PREFIX();
2697 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2698}
2699
2700
2701/** Opcode 0x0f 0xa2. */
2702FNIEMOP_DEF(iemOp_cpuid)
2703{
2704 IEMOP_MNEMONIC("cpuid");
2705 IEMOP_HLP_NO_LOCK_PREFIX();
2706 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2707}
2708
2709
2710/**
2711 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2712 * iemOp_bts_Ev_Gv.
2713 */
2714FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2715{
2716 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2717 IEMOP_HLP_NO_LOCK_PREFIX();
2718 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2719
2720 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2721 {
2722 /* register destination. */
2723 IEMOP_HLP_NO_LOCK_PREFIX();
2724 switch (pIemCpu->enmEffOpSize)
2725 {
2726 case IEMMODE_16BIT:
2727 IEM_MC_BEGIN(3, 0);
2728 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2729 IEM_MC_ARG(uint16_t, u16Src, 1);
2730 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2731
2732 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2733 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2734 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2735 IEM_MC_REF_EFLAGS(pEFlags);
2736 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2737
2738 IEM_MC_ADVANCE_RIP();
2739 IEM_MC_END();
2740 return VINF_SUCCESS;
2741
2742 case IEMMODE_32BIT:
2743 IEM_MC_BEGIN(3, 0);
2744 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2745 IEM_MC_ARG(uint32_t, u32Src, 1);
2746 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2747
2748 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2749 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2750 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2751 IEM_MC_REF_EFLAGS(pEFlags);
2752 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2753
2754 IEM_MC_ADVANCE_RIP();
2755 IEM_MC_END();
2756 return VINF_SUCCESS;
2757
2758 case IEMMODE_64BIT:
2759 IEM_MC_BEGIN(3, 0);
2760 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2761 IEM_MC_ARG(uint64_t, u64Src, 1);
2762 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2763
2764 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2765 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2766 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2767 IEM_MC_REF_EFLAGS(pEFlags);
2768 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2769
2770 IEM_MC_ADVANCE_RIP();
2771 IEM_MC_END();
2772 return VINF_SUCCESS;
2773
2774 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2775 }
2776 }
2777 else
2778 {
2779 /* memory destination. */
2780
2781 uint32_t fAccess;
2782 if (pImpl->pfnLockedU16)
2783 fAccess = IEM_ACCESS_DATA_RW;
2784 else /* BT */
2785 {
2786 IEMOP_HLP_NO_LOCK_PREFIX();
2787 fAccess = IEM_ACCESS_DATA_R;
2788 }
2789
2790 /** @todo test negative bit offsets! */
2791 switch (pIemCpu->enmEffOpSize)
2792 {
2793 case IEMMODE_16BIT:
2794 IEM_MC_BEGIN(3, 2);
2795 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2796 IEM_MC_ARG(uint16_t, u16Src, 1);
2797 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2798 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2799 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2800
2801 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2802 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2803 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2804 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2805 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2806 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2807 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2808 IEM_MC_FETCH_EFLAGS(EFlags);
2809
2810 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2811 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2812 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2813 else
2814 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2815 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2816
2817 IEM_MC_COMMIT_EFLAGS(EFlags);
2818 IEM_MC_ADVANCE_RIP();
2819 IEM_MC_END();
2820 return VINF_SUCCESS;
2821
2822 case IEMMODE_32BIT:
2823 IEM_MC_BEGIN(3, 2);
2824 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2825 IEM_MC_ARG(uint32_t, u32Src, 1);
2826 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2827 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2828 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2829
2830 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2831 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2832 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2833 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2834 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2835 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2836 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2837 IEM_MC_FETCH_EFLAGS(EFlags);
2838
2839 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2840 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2841 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2842 else
2843 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2844 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2845
2846 IEM_MC_COMMIT_EFLAGS(EFlags);
2847 IEM_MC_ADVANCE_RIP();
2848 IEM_MC_END();
2849 return VINF_SUCCESS;
2850
2851 case IEMMODE_64BIT:
2852 IEM_MC_BEGIN(3, 2);
2853 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2854 IEM_MC_ARG(uint64_t, u64Src, 1);
2855 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2856 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2857 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2858
2859 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2860 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2861 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2862 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2863 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2864 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2865 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2866 IEM_MC_FETCH_EFLAGS(EFlags);
2867
2868 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2869 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2870 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2871 else
2872 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2873 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2874
2875 IEM_MC_COMMIT_EFLAGS(EFlags);
2876 IEM_MC_ADVANCE_RIP();
2877 IEM_MC_END();
2878 return VINF_SUCCESS;
2879
2880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2881 }
2882 }
2883}
2884
2885
2886/** Opcode 0x0f 0xa3. */
2887FNIEMOP_DEF(iemOp_bt_Ev_Gv)
2888{
2889 IEMOP_MNEMONIC("bt Gv,Mp");
2890 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
2891}
2892
2893
2894/**
2895 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
2896 */
2897FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
2898{
2899 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
2900 IEMOP_HLP_NO_LOCK_PREFIX();
2901 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
2902
2903 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2904 {
2905 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
2906 IEMOP_HLP_NO_LOCK_PREFIX();
2907
2908 switch (pIemCpu->enmEffOpSize)
2909 {
2910 case IEMMODE_16BIT:
2911 IEM_MC_BEGIN(4, 0);
2912 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2913 IEM_MC_ARG(uint16_t, u16Src, 1);
2914 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2915 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2916
2917 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2918 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2919 IEM_MC_REF_EFLAGS(pEFlags);
2920 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2921
2922 IEM_MC_ADVANCE_RIP();
2923 IEM_MC_END();
2924 return VINF_SUCCESS;
2925
2926 case IEMMODE_32BIT:
2927 IEM_MC_BEGIN(4, 0);
2928 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2929 IEM_MC_ARG(uint32_t, u32Src, 1);
2930 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2931 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2932
2933 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2934 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2935 IEM_MC_REF_EFLAGS(pEFlags);
2936 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
2937
2938 IEM_MC_ADVANCE_RIP();
2939 IEM_MC_END();
2940 return VINF_SUCCESS;
2941
2942 case IEMMODE_64BIT:
2943 IEM_MC_BEGIN(4, 0);
2944 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2945 IEM_MC_ARG(uint64_t, u64Src, 1);
2946 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2947 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2948
2949 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2950 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2951 IEM_MC_REF_EFLAGS(pEFlags);
2952 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
2953
2954 IEM_MC_ADVANCE_RIP();
2955 IEM_MC_END();
2956 return VINF_SUCCESS;
2957
2958 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2959 }
2960 }
2961 else
2962 {
2963 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2964
2965 switch (pIemCpu->enmEffOpSize)
2966 {
2967 case IEMMODE_16BIT:
2968 IEM_MC_BEGIN(4, 2);
2969 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2970 IEM_MC_ARG(uint16_t, u16Src, 1);
2971 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2972 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2973 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2974
2975 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2976 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
2977 IEM_MC_ASSIGN(cShiftArg, cShift);
2978 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2979 IEM_MC_FETCH_EFLAGS(EFlags);
2980 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2981 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2982
2983 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2984 IEM_MC_COMMIT_EFLAGS(EFlags);
2985 IEM_MC_ADVANCE_RIP();
2986 IEM_MC_END();
2987 return VINF_SUCCESS;
2988
2989 case IEMMODE_32BIT:
2990 IEM_MC_BEGIN(4, 2);
2991 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2992 IEM_MC_ARG(uint32_t, u32Src, 1);
2993 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2994 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2995 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2996
2997 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2998 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
2999 IEM_MC_ASSIGN(cShiftArg, cShift);
3000 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3001 IEM_MC_FETCH_EFLAGS(EFlags);
3002 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3003 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3004
3005 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3006 IEM_MC_COMMIT_EFLAGS(EFlags);
3007 IEM_MC_ADVANCE_RIP();
3008 IEM_MC_END();
3009 return VINF_SUCCESS;
3010
3011 case IEMMODE_64BIT:
3012 IEM_MC_BEGIN(4, 2);
3013 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3014 IEM_MC_ARG(uint64_t, u64Src, 1);
3015 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3016 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3017 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3018
3019 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3020 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
3021 IEM_MC_ASSIGN(cShiftArg, cShift);
3022 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3023 IEM_MC_FETCH_EFLAGS(EFlags);
3024 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3025 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3026
3027 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3028 IEM_MC_COMMIT_EFLAGS(EFlags);
3029 IEM_MC_ADVANCE_RIP();
3030 IEM_MC_END();
3031 return VINF_SUCCESS;
3032
3033 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3034 }
3035 }
3036}
3037
3038
3039/**
3040 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3041 */
3042FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3043{
3044 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3045 IEMOP_HLP_NO_LOCK_PREFIX();
3046 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3047
3048 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3049 {
3050 IEMOP_HLP_NO_LOCK_PREFIX();
3051
3052 switch (pIemCpu->enmEffOpSize)
3053 {
3054 case IEMMODE_16BIT:
3055 IEM_MC_BEGIN(4, 0);
3056 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3057 IEM_MC_ARG(uint16_t, u16Src, 1);
3058 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3059 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3060
3061 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3062 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3063 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3064 IEM_MC_REF_EFLAGS(pEFlags);
3065 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3066
3067 IEM_MC_ADVANCE_RIP();
3068 IEM_MC_END();
3069 return VINF_SUCCESS;
3070
3071 case IEMMODE_32BIT:
3072 IEM_MC_BEGIN(4, 0);
3073 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3074 IEM_MC_ARG(uint32_t, u32Src, 1);
3075 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3076 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3077
3078 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3079 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3080 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3081 IEM_MC_REF_EFLAGS(pEFlags);
3082 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3083
3084 IEM_MC_ADVANCE_RIP();
3085 IEM_MC_END();
3086 return VINF_SUCCESS;
3087
3088 case IEMMODE_64BIT:
3089 IEM_MC_BEGIN(4, 0);
3090 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3091 IEM_MC_ARG(uint64_t, u64Src, 1);
3092 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3093 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3094
3095 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3096 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3097 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3098 IEM_MC_REF_EFLAGS(pEFlags);
3099 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3100
3101 IEM_MC_ADVANCE_RIP();
3102 IEM_MC_END();
3103 return VINF_SUCCESS;
3104
3105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3106 }
3107 }
3108 else
3109 {
3110 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3111
3112 switch (pIemCpu->enmEffOpSize)
3113 {
3114 case IEMMODE_16BIT:
3115 IEM_MC_BEGIN(4, 2);
3116 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3117 IEM_MC_ARG(uint16_t, u16Src, 1);
3118 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3119 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3120 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3121
3122 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3123 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3124 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3125 IEM_MC_FETCH_EFLAGS(EFlags);
3126 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3127 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3128
3129 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3130 IEM_MC_COMMIT_EFLAGS(EFlags);
3131 IEM_MC_ADVANCE_RIP();
3132 IEM_MC_END();
3133 return VINF_SUCCESS;
3134
3135 case IEMMODE_32BIT:
3136 IEM_MC_BEGIN(4, 2);
3137 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3138 IEM_MC_ARG(uint32_t, u32Src, 1);
3139 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3140 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3141 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3142
3143 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3144 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3145 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3146 IEM_MC_FETCH_EFLAGS(EFlags);
3147 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3148 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3149
3150 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3151 IEM_MC_COMMIT_EFLAGS(EFlags);
3152 IEM_MC_ADVANCE_RIP();
3153 IEM_MC_END();
3154 return VINF_SUCCESS;
3155
3156 case IEMMODE_64BIT:
3157 IEM_MC_BEGIN(4, 2);
3158 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3159 IEM_MC_ARG(uint64_t, u64Src, 1);
3160 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3161 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3163
3164 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3165 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3166 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3167 IEM_MC_FETCH_EFLAGS(EFlags);
3168 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3169 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3170
3171 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3172 IEM_MC_COMMIT_EFLAGS(EFlags);
3173 IEM_MC_ADVANCE_RIP();
3174 IEM_MC_END();
3175 return VINF_SUCCESS;
3176
3177 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3178 }
3179 }
3180}
3181
3182
3183
3184/** Opcode 0x0f 0xa4. */
3185FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3186{
3187 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3188 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3189}
3190
3191
3192/** Opcode 0x0f 0xa7. */
3193FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3194{
3195 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3196 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3197}
3198
3199
3200/** Opcode 0x0f 0xa8. */
3201FNIEMOP_DEF(iemOp_push_gs)
3202{
3203 IEMOP_MNEMONIC("push gs");
3204 IEMOP_HLP_NO_LOCK_PREFIX();
3205 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3206}
3207
3208
3209/** Opcode 0x0f 0xa9. */
3210FNIEMOP_DEF(iemOp_pop_gs)
3211{
3212 IEMOP_MNEMONIC("pop gs");
3213 IEMOP_HLP_NO_LOCK_PREFIX();
3214 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3215}
3216
3217
3218/** Opcode 0x0f 0xaa. */
3219FNIEMOP_STUB(iemOp_rsm);
3220
3221
3222/** Opcode 0x0f 0xab. */
3223FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3224{
3225 IEMOP_MNEMONIC("bts Gv,Mp");
3226 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3227}
3228
3229
3230/** Opcode 0x0f 0xac. */
3231FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3232{
3233 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3234 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3235}
3236
3237
3238/** Opcode 0x0f 0xad. */
3239FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3240{
3241 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3242 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3243}
3244
3245
3246/** Opcode 0x0f 0xae. */
3247FNIEMOP_STUB(iemOp_Grp15);
3248
3249
3250/** Opcode 0x0f 0xaf. */
3251FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3252{
3253 IEMOP_MNEMONIC("imul Gv,Ev");
3254 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3255 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3256}
3257
3258
3259/** Opcode 0x0f 0xb0. */
3260FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3261/** Opcode 0x0f 0xb1. */
3262FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3263
3264
3265FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3266{
3267 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3268 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3269
3270 /* The source cannot be a register. */
3271 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3272 return IEMOP_RAISE_INVALID_OPCODE();
3273 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & bRm & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3274
3275 switch (pIemCpu->enmEffOpSize)
3276 {
3277 case IEMMODE_16BIT:
3278 IEM_MC_BEGIN(5, 1);
3279 IEM_MC_ARG(uint16_t, uSel, 0);
3280 IEM_MC_ARG(uint16_t, offSeg, 1);
3281 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3282 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3283 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3284 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3285 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3286 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3287 IEM_MC_FETCH_MEM_U16(uSel, pIemCpu->iEffSeg, GCPtrEff + 2);
3288 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3289 IEM_MC_END();
3290 return VINF_SUCCESS;
3291
3292 case IEMMODE_32BIT:
3293 IEM_MC_BEGIN(5, 1);
3294 IEM_MC_ARG(uint16_t, uSel, 0);
3295 IEM_MC_ARG(uint32_t, offSeg, 1);
3296 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3297 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3298 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3299 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3300 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3301 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3302 IEM_MC_FETCH_MEM_U16(uSel, pIemCpu->iEffSeg, GCPtrEff + 4);
3303 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3304 IEM_MC_END();
3305 return VINF_SUCCESS;
3306
3307 case IEMMODE_64BIT:
3308 IEM_MC_BEGIN(5, 1);
3309 IEM_MC_ARG(uint16_t, uSel, 0);
3310 IEM_MC_ARG(uint64_t, offSeg, 1);
3311 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3312 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3313 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3314 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3315 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3316 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3317 IEM_MC_FETCH_MEM_U16(uSel, pIemCpu->iEffSeg, GCPtrEff + 8);
3318 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3319 IEM_MC_END();
3320 return VINF_SUCCESS;
3321
3322 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3323 }
3324}
3325
3326
3327/** Opcode 0x0f 0xb2. */
3328FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3329{
3330 IEMOP_MNEMONIC("lss Gv,Mp");
3331 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3332}
3333
3334
3335/** Opcode 0x0f 0xb3. */
3336FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3337{
3338 IEMOP_MNEMONIC("btr Gv,Mp");
3339 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3340}
3341
3342
3343/** Opcode 0x0f 0xb4. */
3344FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3345{
3346 IEMOP_MNEMONIC("lfs Gv,Mp");
3347 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3348}
3349
3350
3351/** Opcode 0x0f 0xb5. */
3352FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3353{
3354 IEMOP_MNEMONIC("lgs Gv,Mp");
3355 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3356}
3357
3358
3359/** Opcode 0x0f 0xb6. */
3360FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3361{
3362 IEMOP_MNEMONIC("movzx Gv,Eb");
3363
3364 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3365 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3366
3367 /*
3368 * If rm is denoting a register, no more instruction bytes.
3369 */
3370 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3371 {
3372 switch (pIemCpu->enmEffOpSize)
3373 {
3374 case IEMMODE_16BIT:
3375 IEM_MC_BEGIN(0, 1);
3376 IEM_MC_LOCAL(uint16_t, u16Value);
3377 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3378 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3379 IEM_MC_ADVANCE_RIP();
3380 IEM_MC_END();
3381 return VINF_SUCCESS;
3382
3383 case IEMMODE_32BIT:
3384 IEM_MC_BEGIN(0, 1);
3385 IEM_MC_LOCAL(uint32_t, u32Value);
3386 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3387 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3388 IEM_MC_ADVANCE_RIP();
3389 IEM_MC_END();
3390 return VINF_SUCCESS;
3391
3392 case IEMMODE_64BIT:
3393 IEM_MC_BEGIN(0, 1);
3394 IEM_MC_LOCAL(uint64_t, u64Value);
3395 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3396 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3397 IEM_MC_ADVANCE_RIP();
3398 IEM_MC_END();
3399 return VINF_SUCCESS;
3400
3401 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3402 }
3403 }
3404 else
3405 {
3406 /*
3407 * We're loading a register from memory.
3408 */
3409 switch (pIemCpu->enmEffOpSize)
3410 {
3411 case IEMMODE_16BIT:
3412 IEM_MC_BEGIN(0, 2);
3413 IEM_MC_LOCAL(uint16_t, u16Value);
3414 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3415 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3416 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3417 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3418 IEM_MC_ADVANCE_RIP();
3419 IEM_MC_END();
3420 return VINF_SUCCESS;
3421
3422 case IEMMODE_32BIT:
3423 IEM_MC_BEGIN(0, 2);
3424 IEM_MC_LOCAL(uint32_t, u32Value);
3425 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3426 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3427 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3428 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3429 IEM_MC_ADVANCE_RIP();
3430 IEM_MC_END();
3431 return VINF_SUCCESS;
3432
3433 case IEMMODE_64BIT:
3434 IEM_MC_BEGIN(0, 2);
3435 IEM_MC_LOCAL(uint64_t, u64Value);
3436 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3437 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3438 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3439 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3440 IEM_MC_ADVANCE_RIP();
3441 IEM_MC_END();
3442 return VINF_SUCCESS;
3443
3444 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3445 }
3446 }
3447}
3448
3449
3450/** Opcode 0x0f 0xb7. */
3451FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3452{
3453 IEMOP_MNEMONIC("movzx Gv,Ew");
3454
3455 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3456 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3457
3458 /** @todo Not entirely sure how the operand size prefix is handled here,
3459 * assuming that it will be ignored. Would be nice to have a few
3460 * test for this. */
3461 /*
3462 * If rm is denoting a register, no more instruction bytes.
3463 */
3464 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3465 {
3466 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3467 {
3468 IEM_MC_BEGIN(0, 1);
3469 IEM_MC_LOCAL(uint32_t, u32Value);
3470 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3471 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3472 IEM_MC_ADVANCE_RIP();
3473 IEM_MC_END();
3474 }
3475 else
3476 {
3477 IEM_MC_BEGIN(0, 1);
3478 IEM_MC_LOCAL(uint64_t, u64Value);
3479 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3480 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3481 IEM_MC_ADVANCE_RIP();
3482 IEM_MC_END();
3483 }
3484 }
3485 else
3486 {
3487 /*
3488 * We're loading a register from memory.
3489 */
3490 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3491 {
3492 IEM_MC_BEGIN(0, 2);
3493 IEM_MC_LOCAL(uint32_t, u32Value);
3494 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3495 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3496 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3497 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3498 IEM_MC_ADVANCE_RIP();
3499 IEM_MC_END();
3500 }
3501 else
3502 {
3503 IEM_MC_BEGIN(0, 2);
3504 IEM_MC_LOCAL(uint64_t, u64Value);
3505 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3506 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3507 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3508 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3509 IEM_MC_ADVANCE_RIP();
3510 IEM_MC_END();
3511 }
3512 }
3513 return VINF_SUCCESS;
3514}
3515
3516
3517/** Opcode 0x0f 0xb8. */
3518FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3519/** Opcode 0x0f 0xb9. */
3520FNIEMOP_STUB(iemOp_Grp10);
3521
3522
3523/** Opcode 0x0f 0xba. */
3524FNIEMOP_DEF(iemOp_Grp8)
3525{
3526 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3527 PCIEMOPBINSIZES pImpl;
3528 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3529 {
3530 case 0: case 1: case 2: case 3:
3531 return IEMOP_RAISE_INVALID_OPCODE();
3532 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3533 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3534 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3535 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3536 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3537 }
3538 IEMOP_HLP_NO_LOCK_PREFIX();
3539 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3540
3541 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3542 {
3543 /* register destination. */
3544 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Bit);
3545 IEMOP_HLP_NO_LOCK_PREFIX();
3546
3547 switch (pIemCpu->enmEffOpSize)
3548 {
3549 case IEMMODE_16BIT:
3550 IEM_MC_BEGIN(3, 0);
3551 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3552 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3553 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3554
3555 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3556 IEM_MC_REF_EFLAGS(pEFlags);
3557 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3558
3559 IEM_MC_ADVANCE_RIP();
3560 IEM_MC_END();
3561 return VINF_SUCCESS;
3562
3563 case IEMMODE_32BIT:
3564 IEM_MC_BEGIN(3, 0);
3565 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3566 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3567 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3568
3569 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3570 IEM_MC_REF_EFLAGS(pEFlags);
3571 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3572
3573 IEM_MC_ADVANCE_RIP();
3574 IEM_MC_END();
3575 return VINF_SUCCESS;
3576
3577 case IEMMODE_64BIT:
3578 IEM_MC_BEGIN(3, 0);
3579 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3580 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3581 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3582
3583 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3584 IEM_MC_REF_EFLAGS(pEFlags);
3585 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3586
3587 IEM_MC_ADVANCE_RIP();
3588 IEM_MC_END();
3589 return VINF_SUCCESS;
3590
3591 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3592 }
3593 }
3594 else
3595 {
3596 /* memory destination. */
3597
3598 uint32_t fAccess;
3599 if (pImpl->pfnLockedU16)
3600 fAccess = IEM_ACCESS_DATA_RW;
3601 else /* BT */
3602 {
3603 IEMOP_HLP_NO_LOCK_PREFIX();
3604 fAccess = IEM_ACCESS_DATA_R;
3605 }
3606
3607 /** @todo test negative bit offsets! */
3608 switch (pIemCpu->enmEffOpSize)
3609 {
3610 case IEMMODE_16BIT:
3611 IEM_MC_BEGIN(3, 1);
3612 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3613 IEM_MC_ARG(uint16_t, u16Src, 1);
3614 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3615 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3616
3617 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3618 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Bit);
3619 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3620 IEM_MC_FETCH_EFLAGS(EFlags);
3621 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3622 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3623 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3624 else
3625 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3626 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3627
3628 IEM_MC_COMMIT_EFLAGS(EFlags);
3629 IEM_MC_ADVANCE_RIP();
3630 IEM_MC_END();
3631 return VINF_SUCCESS;
3632
3633 case IEMMODE_32BIT:
3634 IEM_MC_BEGIN(3, 1);
3635 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3636 IEM_MC_ARG(uint32_t, u32Src, 1);
3637 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3638 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3639
3640 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3641 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Bit);
3642 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3643 IEM_MC_FETCH_EFLAGS(EFlags);
3644 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3645 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3646 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3647 else
3648 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3649 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3650
3651 IEM_MC_COMMIT_EFLAGS(EFlags);
3652 IEM_MC_ADVANCE_RIP();
3653 IEM_MC_END();
3654 return VINF_SUCCESS;
3655
3656 case IEMMODE_64BIT:
3657 IEM_MC_BEGIN(3, 1);
3658 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3659 IEM_MC_ARG(uint64_t, u64Src, 1);
3660 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3661 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3662
3663 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3664 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Bit);
3665 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3666 IEM_MC_FETCH_EFLAGS(EFlags);
3667 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3668 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3669 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3670 else
3671 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3672 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3673
3674 IEM_MC_COMMIT_EFLAGS(EFlags);
3675 IEM_MC_ADVANCE_RIP();
3676 IEM_MC_END();
3677 return VINF_SUCCESS;
3678
3679 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3680 }
3681 }
3682
3683}
3684
3685
3686/** Opcode 0x0f 0xbb. */
3687FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3688{
3689 IEMOP_MNEMONIC("btc Gv,Mp");
3690 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3691}
3692
3693
3694/** Opcode 0x0f 0xbc. */
3695FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3696{
3697 IEMOP_MNEMONIC("bsf Gv,Ev");
3698 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3699 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3700}
3701
3702
3703/** Opcode 0x0f 0xbd. */
3704FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3705{
3706 IEMOP_MNEMONIC("bsr Gv,Ev");
3707 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3708 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3709}
3710
3711
3712/** Opcode 0x0f 0xbe. */
3713FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3714{
3715 IEMOP_MNEMONIC("movsx Gv,Eb");
3716
3717 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3718 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3719
3720 /*
3721 * If rm is denoting a register, no more instruction bytes.
3722 */
3723 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3724 {
3725 switch (pIemCpu->enmEffOpSize)
3726 {
3727 case IEMMODE_16BIT:
3728 IEM_MC_BEGIN(0, 1);
3729 IEM_MC_LOCAL(uint16_t, u16Value);
3730 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3731 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3732 IEM_MC_ADVANCE_RIP();
3733 IEM_MC_END();
3734 return VINF_SUCCESS;
3735
3736 case IEMMODE_32BIT:
3737 IEM_MC_BEGIN(0, 1);
3738 IEM_MC_LOCAL(uint32_t, u32Value);
3739 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3740 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3741 IEM_MC_ADVANCE_RIP();
3742 IEM_MC_END();
3743 return VINF_SUCCESS;
3744
3745 case IEMMODE_64BIT:
3746 IEM_MC_BEGIN(0, 1);
3747 IEM_MC_LOCAL(uint64_t, u64Value);
3748 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3749 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3750 IEM_MC_ADVANCE_RIP();
3751 IEM_MC_END();
3752 return VINF_SUCCESS;
3753
3754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3755 }
3756 }
3757 else
3758 {
3759 /*
3760 * We're loading a register from memory.
3761 */
3762 switch (pIemCpu->enmEffOpSize)
3763 {
3764 case IEMMODE_16BIT:
3765 IEM_MC_BEGIN(0, 2);
3766 IEM_MC_LOCAL(uint16_t, u16Value);
3767 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3768 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3769 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3770 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3771 IEM_MC_ADVANCE_RIP();
3772 IEM_MC_END();
3773 return VINF_SUCCESS;
3774
3775 case IEMMODE_32BIT:
3776 IEM_MC_BEGIN(0, 2);
3777 IEM_MC_LOCAL(uint32_t, u32Value);
3778 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3779 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3780 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3781 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3782 IEM_MC_ADVANCE_RIP();
3783 IEM_MC_END();
3784 return VINF_SUCCESS;
3785
3786 case IEMMODE_64BIT:
3787 IEM_MC_BEGIN(0, 2);
3788 IEM_MC_LOCAL(uint64_t, u64Value);
3789 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3790 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3791 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3792 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3793 IEM_MC_ADVANCE_RIP();
3794 IEM_MC_END();
3795 return VINF_SUCCESS;
3796
3797 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3798 }
3799 }
3800}
3801
3802
3803/** Opcode 0x0f 0xbf. */
3804FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
3805{
3806 IEMOP_MNEMONIC("movsx Gv,Ew");
3807
3808 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
3809 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3810
3811 /** @todo Not entirely sure how the operand size prefix is handled here,
3812 * assuming that it will be ignored. Would be nice to have a few
3813 * test for this. */
3814 /*
3815 * If rm is denoting a register, no more instruction bytes.
3816 */
3817 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3818 {
3819 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3820 {
3821 IEM_MC_BEGIN(0, 1);
3822 IEM_MC_LOCAL(uint32_t, u32Value);
3823 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3824 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3825 IEM_MC_ADVANCE_RIP();
3826 IEM_MC_END();
3827 }
3828 else
3829 {
3830 IEM_MC_BEGIN(0, 1);
3831 IEM_MC_LOCAL(uint64_t, u64Value);
3832 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3833 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3834 IEM_MC_ADVANCE_RIP();
3835 IEM_MC_END();
3836 }
3837 }
3838 else
3839 {
3840 /*
3841 * We're loading a register from memory.
3842 */
3843 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3844 {
3845 IEM_MC_BEGIN(0, 2);
3846 IEM_MC_LOCAL(uint32_t, u32Value);
3847 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3848 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3849 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3850 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3851 IEM_MC_ADVANCE_RIP();
3852 IEM_MC_END();
3853 }
3854 else
3855 {
3856 IEM_MC_BEGIN(0, 2);
3857 IEM_MC_LOCAL(uint64_t, u64Value);
3858 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3859 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3860 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3861 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3862 IEM_MC_ADVANCE_RIP();
3863 IEM_MC_END();
3864 }
3865 }
3866 return VINF_SUCCESS;
3867}
3868
3869
3870/** Opcode 0x0f 0xc0. */
3871FNIEMOP_STUB(iemOp_xadd_Eb_Gb);
3872/** Opcode 0x0f 0xc1. */
3873FNIEMOP_STUB(iemOp_xadd_Ev_Gv);
3874/** Opcode 0x0f 0xc2. */
3875FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
3876/** Opcode 0x0f 0xc3. */
3877FNIEMOP_STUB(iemOp_movnti_My_Gy);
3878/** Opcode 0x0f 0xc4. */
3879FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
3880/** Opcode 0x0f 0xc5. */
3881FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
3882/** Opcode 0x0f 0xc6. */
3883FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
3884/** Opcode 0x0f 0xc7. */
3885FNIEMOP_STUB(iemOp_Grp9);
3886/** Opcode 0x0f 0xc8. */
3887FNIEMOP_STUB(iemOp_bswap_rAX_r8);
3888/** Opcode 0x0f 0xc9. */
3889FNIEMOP_STUB(iemOp_bswap_rCX_r9);
3890/** Opcode 0x0f 0xca. */
3891FNIEMOP_STUB(iemOp_bswap_rDX_r10);
3892/** Opcode 0x0f 0xcb. */
3893FNIEMOP_STUB(iemOp_bswap_rBX_r11);
3894/** Opcode 0x0f 0xcc. */
3895FNIEMOP_STUB(iemOp_bswap_rSP_r12);
3896/** Opcode 0x0f 0xcd. */
3897FNIEMOP_STUB(iemOp_bswap_rBP_r13);
3898/** Opcode 0x0f 0xce. */
3899FNIEMOP_STUB(iemOp_bswap_rSI_r14);
3900/** Opcode 0x0f 0xcf. */
3901FNIEMOP_STUB(iemOp_bswap_rDI_r15);
3902/** Opcode 0x0f 0xd0. */
3903FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
3904/** Opcode 0x0f 0xd1. */
3905FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
3906/** Opcode 0x0f 0xd2. */
3907FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
3908/** Opcode 0x0f 0xd3. */
3909FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
3910/** Opcode 0x0f 0xd4. */
3911FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
3912/** Opcode 0x0f 0xd5. */
3913FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
3914/** Opcode 0x0f 0xd6. */
3915FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
3916/** Opcode 0x0f 0xd7. */
3917FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
3918/** Opcode 0x0f 0xd8. */
3919FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
3920/** Opcode 0x0f 0xd9. */
3921FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
3922/** Opcode 0x0f 0xda. */
3923FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
3924/** Opcode 0x0f 0xdb. */
3925FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
3926/** Opcode 0x0f 0xdc. */
3927FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
3928/** Opcode 0x0f 0xdd. */
3929FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
3930/** Opcode 0x0f 0xde. */
3931FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
3932/** Opcode 0x0f 0xdf. */
3933FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
3934/** Opcode 0x0f 0xe0. */
3935FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
3936/** Opcode 0x0f 0xe1. */
3937FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
3938/** Opcode 0x0f 0xe2. */
3939FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
3940/** Opcode 0x0f 0xe3. */
3941FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
3942/** Opcode 0x0f 0xe4. */
3943FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
3944/** Opcode 0x0f 0xe5. */
3945FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
3946/** Opcode 0x0f 0xe6. */
3947FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
3948/** Opcode 0x0f 0xe7. */
3949FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
3950/** Opcode 0x0f 0xe8. */
3951FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
3952/** Opcode 0x0f 0xe9. */
3953FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
3954/** Opcode 0x0f 0xea. */
3955FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
3956/** Opcode 0x0f 0xeb. */
3957FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
3958/** Opcode 0x0f 0xec. */
3959FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
3960/** Opcode 0x0f 0xed. */
3961FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
3962/** Opcode 0x0f 0xee. */
3963FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
3964/** Opcode 0x0f 0xef. */
3965FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
3966/** Opcode 0x0f 0xf0. */
3967FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
3968/** Opcode 0x0f 0xf1. */
3969FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
3970/** Opcode 0x0f 0xf2. */
3971FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
3972/** Opcode 0x0f 0xf3. */
3973FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
3974/** Opcode 0x0f 0xf4. */
3975FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
3976/** Opcode 0x0f 0xf5. */
3977FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
3978/** Opcode 0x0f 0xf6. */
3979FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
3980/** Opcode 0x0f 0xf7. */
3981FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
3982/** Opcode 0x0f 0xf8. */
3983FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
3984/** Opcode 0x0f 0xf9. */
3985FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
3986/** Opcode 0x0f 0xfa. */
3987FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
3988/** Opcode 0x0f 0xfb. */
3989FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
3990/** Opcode 0x0f 0xfc. */
3991FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
3992/** Opcode 0x0f 0xfd. */
3993FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
3994/** Opcode 0x0f 0xfe. */
3995FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
3996
3997
3998const PFNIEMOP g_apfnTwoByteMap[256] =
3999{
4000 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4001 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4002 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4003 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_prefetch, iemOp_femms, iemOp_3Dnow,
4004 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4005 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4006 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4007 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4008 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4009 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4010 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4011 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4012 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4013 /* 0x1c */ iemOp_Invalid, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4014 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4015 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4016 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4017 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4018 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4019 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4020 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4021 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4022 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4023 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4024 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4025 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4026 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4027 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4028 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4029 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4030 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4031 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4032 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4033 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4034 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4035 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4036 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4037 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4038 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4039 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4040 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4041 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4042 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4043 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4044 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4045 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4046 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4047 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4048 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4049 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4050 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4051 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4052 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4053 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4054 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4055 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4056 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4057 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4058 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4059 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4060 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4061 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4062 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4063 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4064 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4065 /* 0x71 */ iemOp_Grp12,
4066 /* 0x72 */ iemOp_Grp13,
4067 /* 0x73 */ iemOp_Grp14,
4068 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4069 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4070 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4071 /* 0x77 */ iemOp_emms,
4072 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4073 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4074 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4075 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4076 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4077 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4078 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4079 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4080 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4081 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4082 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4083 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4084 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4085 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4086 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4087 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4088 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4089 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4090 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4091 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4092 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4093 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4094 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4095 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4096 /* 0xc3 */ iemOp_movnti_My_Gy,
4097 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4098 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4099 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4100 /* 0xc7 */ iemOp_Grp9,
4101 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4102 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4103 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4104 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4105 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4106 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4107 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4108 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4109 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4110 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4111 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4112 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4113 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4114 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4115 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4116 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4117 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4118 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4119 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4120 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4121 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4122 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4123 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4124 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4125 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4126 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4127 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4128 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4129 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4130 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4131 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4132 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4133 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4134 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4135 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4136 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4137 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4138 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4139 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4140 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4141 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4142 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4143 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4144 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4145 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4146 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4147 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4148 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4149 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4150 /* 0xff */ iemOp_Invalid
4151};
4152
4153/** @} */
4154
4155
4156/** @name One byte opcodes.
4157 *
4158 * @{
4159 */
4160
4161/** Opcode 0x00. */
4162FNIEMOP_DEF(iemOp_add_Eb_Gb)
4163{
4164 IEMOP_MNEMONIC("add Eb,Gb");
4165 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4166}
4167
4168
4169/** Opcode 0x01. */
4170FNIEMOP_DEF(iemOp_add_Ev_Gv)
4171{
4172 IEMOP_MNEMONIC("add Ev,Gv");
4173 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4174}
4175
4176
4177/** Opcode 0x02. */
4178FNIEMOP_DEF(iemOp_add_Gb_Eb)
4179{
4180 IEMOP_MNEMONIC("add Gb,Eb");
4181 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4182}
4183
4184
4185/** Opcode 0x03. */
4186FNIEMOP_DEF(iemOp_add_Gv_Ev)
4187{
4188 IEMOP_MNEMONIC("add Gv,Ev");
4189 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4190}
4191
4192
4193/** Opcode 0x04. */
4194FNIEMOP_DEF(iemOp_add_Al_Ib)
4195{
4196 IEMOP_MNEMONIC("add al,Ib");
4197 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4198}
4199
4200
4201/** Opcode 0x05. */
4202FNIEMOP_DEF(iemOp_add_eAX_Iz)
4203{
4204 IEMOP_MNEMONIC("add rAX,Iz");
4205 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4206}
4207
4208
4209/** Opcode 0x06. */
4210FNIEMOP_DEF(iemOp_push_ES)
4211{
4212 IEMOP_MNEMONIC("push es");
4213 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4214}
4215
4216
4217/** Opcode 0x07. */
4218FNIEMOP_DEF(iemOp_pop_ES)
4219{
4220 IEMOP_MNEMONIC("pop es");
4221 IEMOP_HLP_NO_64BIT();
4222 IEMOP_HLP_NO_LOCK_PREFIX();
4223 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4224}
4225
4226
4227/** Opcode 0x08. */
4228FNIEMOP_DEF(iemOp_or_Eb_Gb)
4229{
4230 IEMOP_MNEMONIC("or Eb,Gb");
4231 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4232 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4233}
4234
4235
4236/** Opcode 0x09. */
4237FNIEMOP_DEF(iemOp_or_Ev_Gv)
4238{
4239 IEMOP_MNEMONIC("or Ev,Gv ");
4240 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4241 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4242}
4243
4244
4245/** Opcode 0x0a. */
4246FNIEMOP_DEF(iemOp_or_Gb_Eb)
4247{
4248 IEMOP_MNEMONIC("or Gb,Eb");
4249 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4250 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4251}
4252
4253
4254/** Opcode 0x0b. */
4255FNIEMOP_DEF(iemOp_or_Gv_Ev)
4256{
4257 IEMOP_MNEMONIC("or Gv,Ev");
4258 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4259 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4260}
4261
4262
4263/** Opcode 0x0c. */
4264FNIEMOP_DEF(iemOp_or_Al_Ib)
4265{
4266 IEMOP_MNEMONIC("or al,Ib");
4267 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4268 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4269}
4270
4271
4272/** Opcode 0x0d. */
4273FNIEMOP_DEF(iemOp_or_eAX_Iz)
4274{
4275 IEMOP_MNEMONIC("or rAX,Iz");
4276 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4277 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4278}
4279
4280
4281/** Opcode 0x0e. */
4282FNIEMOP_DEF(iemOp_push_CS)
4283{
4284 IEMOP_MNEMONIC("push cs");
4285 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4286}
4287
4288
4289/** Opcode 0x0f. */
4290FNIEMOP_DEF(iemOp_2byteEscape)
4291{
4292 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4293 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4294}
4295
4296/** Opcode 0x10. */
4297FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4298{
4299 IEMOP_MNEMONIC("adc Eb,Gb");
4300 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4301}
4302
4303
4304/** Opcode 0x11. */
4305FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4306{
4307 IEMOP_MNEMONIC("adc Ev,Gv");
4308 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4309}
4310
4311
4312/** Opcode 0x12. */
4313FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4314{
4315 IEMOP_MNEMONIC("adc Gb,Eb");
4316 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4317}
4318
4319
4320/** Opcode 0x13. */
4321FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4322{
4323 IEMOP_MNEMONIC("adc Gv,Ev");
4324 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4325}
4326
4327
4328/** Opcode 0x14. */
4329FNIEMOP_DEF(iemOp_adc_Al_Ib)
4330{
4331 IEMOP_MNEMONIC("adc al,Ib");
4332 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4333}
4334
4335
4336/** Opcode 0x15. */
4337FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4338{
4339 IEMOP_MNEMONIC("adc rAX,Iz");
4340 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4341}
4342
4343
4344/** Opcode 0x16. */
4345FNIEMOP_DEF(iemOp_push_SS)
4346{
4347 IEMOP_MNEMONIC("push ss");
4348 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4349}
4350
4351
4352/** Opcode 0x17. */
4353FNIEMOP_DEF(iemOp_pop_SS)
4354{
4355 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4356 IEMOP_HLP_NO_LOCK_PREFIX();
4357 IEMOP_HLP_NO_64BIT();
4358 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4359}
4360
4361
4362/** Opcode 0x18. */
4363FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4364{
4365 IEMOP_MNEMONIC("sbb Eb,Gb");
4366 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4367}
4368
4369
4370/** Opcode 0x19. */
4371FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4372{
4373 IEMOP_MNEMONIC("sbb Ev,Gv");
4374 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4375}
4376
4377
4378/** Opcode 0x1a. */
4379FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4380{
4381 IEMOP_MNEMONIC("sbb Gb,Eb");
4382 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4383}
4384
4385
4386/** Opcode 0x1b. */
4387FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4388{
4389 IEMOP_MNEMONIC("sbb Gv,Ev");
4390 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4391}
4392
4393
4394/** Opcode 0x1c. */
4395FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4396{
4397 IEMOP_MNEMONIC("sbb al,Ib");
4398 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4399}
4400
4401
4402/** Opcode 0x1d. */
4403FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4404{
4405 IEMOP_MNEMONIC("sbb rAX,Iz");
4406 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4407}
4408
4409
4410/** Opcode 0x1e. */
4411FNIEMOP_DEF(iemOp_push_DS)
4412{
4413 IEMOP_MNEMONIC("push ds");
4414 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4415}
4416
4417
4418/** Opcode 0x1f. */
4419FNIEMOP_DEF(iemOp_pop_DS)
4420{
4421 IEMOP_MNEMONIC("pop ds");
4422 IEMOP_HLP_NO_LOCK_PREFIX();
4423 IEMOP_HLP_NO_64BIT();
4424 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4425}
4426
4427
4428/** Opcode 0x20. */
4429FNIEMOP_DEF(iemOp_and_Eb_Gb)
4430{
4431 IEMOP_MNEMONIC("and Eb,Gb");
4432 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4433 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4434}
4435
4436
4437/** Opcode 0x21. */
4438FNIEMOP_DEF(iemOp_and_Ev_Gv)
4439{
4440 IEMOP_MNEMONIC("and Ev,Gv");
4441 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4442 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4443}
4444
4445
4446/** Opcode 0x22. */
4447FNIEMOP_DEF(iemOp_and_Gb_Eb)
4448{
4449 IEMOP_MNEMONIC("and Gb,Eb");
4450 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4451 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4452}
4453
4454
4455/** Opcode 0x23. */
4456FNIEMOP_DEF(iemOp_and_Gv_Ev)
4457{
4458 IEMOP_MNEMONIC("and Gv,Ev");
4459 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4460 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
4461}
4462
4463
4464/** Opcode 0x24. */
4465FNIEMOP_DEF(iemOp_and_Al_Ib)
4466{
4467 IEMOP_MNEMONIC("and al,Ib");
4468 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4469 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
4470}
4471
4472
4473/** Opcode 0x25. */
4474FNIEMOP_DEF(iemOp_and_eAX_Iz)
4475{
4476 IEMOP_MNEMONIC("and rAX,Iz");
4477 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4478 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
4479}
4480
4481
4482/** Opcode 0x26. */
4483FNIEMOP_DEF(iemOp_seg_ES)
4484{
4485 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
4486 pIemCpu->iEffSeg = X86_SREG_ES;
4487
4488 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4489 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4490}
4491
4492
4493/** Opcode 0x27. */
4494FNIEMOP_STUB(iemOp_daa);
4495
4496
4497/** Opcode 0x28. */
4498FNIEMOP_DEF(iemOp_sub_Eb_Gb)
4499{
4500 IEMOP_MNEMONIC("sub Eb,Gb");
4501 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
4502}
4503
4504
4505/** Opcode 0x29. */
4506FNIEMOP_DEF(iemOp_sub_Ev_Gv)
4507{
4508 IEMOP_MNEMONIC("sub Ev,Gv");
4509 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
4510}
4511
4512
4513/** Opcode 0x2a. */
4514FNIEMOP_DEF(iemOp_sub_Gb_Eb)
4515{
4516 IEMOP_MNEMONIC("sub Gb,Eb");
4517 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
4518}
4519
4520
4521/** Opcode 0x2b. */
4522FNIEMOP_DEF(iemOp_sub_Gv_Ev)
4523{
4524 IEMOP_MNEMONIC("sub Gv,Ev");
4525 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
4526}
4527
4528
4529/** Opcode 0x2c. */
4530FNIEMOP_DEF(iemOp_sub_Al_Ib)
4531{
4532 IEMOP_MNEMONIC("sub al,Ib");
4533 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
4534}
4535
4536
4537/** Opcode 0x2d. */
4538FNIEMOP_DEF(iemOp_sub_eAX_Iz)
4539{
4540 IEMOP_MNEMONIC("sub rAX,Iz");
4541 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
4542}
4543
4544
4545/** Opcode 0x2e. */
4546FNIEMOP_DEF(iemOp_seg_CS)
4547{
4548 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
4549 pIemCpu->iEffSeg = X86_SREG_CS;
4550
4551 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4552 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4553}
4554
4555
4556/** Opcode 0x2f. */
4557FNIEMOP_STUB(iemOp_das);
4558
4559
4560/** Opcode 0x30. */
4561FNIEMOP_DEF(iemOp_xor_Eb_Gb)
4562{
4563 IEMOP_MNEMONIC("xor Eb,Gb");
4564 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4565 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
4566}
4567
4568
4569/** Opcode 0x31. */
4570FNIEMOP_DEF(iemOp_xor_Ev_Gv)
4571{
4572 IEMOP_MNEMONIC("xor Ev,Gv");
4573 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4574 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
4575}
4576
4577
4578/** Opcode 0x32. */
4579FNIEMOP_DEF(iemOp_xor_Gb_Eb)
4580{
4581 IEMOP_MNEMONIC("xor Gb,Eb");
4582 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4583 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
4584}
4585
4586
4587/** Opcode 0x33. */
4588FNIEMOP_DEF(iemOp_xor_Gv_Ev)
4589{
4590 IEMOP_MNEMONIC("xor Gv,Ev");
4591 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4592 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
4593}
4594
4595
4596/** Opcode 0x34. */
4597FNIEMOP_DEF(iemOp_xor_Al_Ib)
4598{
4599 IEMOP_MNEMONIC("xor al,Ib");
4600 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4601 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
4602}
4603
4604
4605/** Opcode 0x35. */
4606FNIEMOP_DEF(iemOp_xor_eAX_Iz)
4607{
4608 IEMOP_MNEMONIC("xor rAX,Iz");
4609 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4610 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
4611}
4612
4613
4614/** Opcode 0x36. */
4615FNIEMOP_DEF(iemOp_seg_SS)
4616{
4617 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
4618 pIemCpu->iEffSeg = X86_SREG_SS;
4619
4620 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4621 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4622}
4623
4624
4625/** Opcode 0x37. */
4626FNIEMOP_STUB(iemOp_aaa);
4627
4628
4629/** Opcode 0x38. */
4630FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
4631{
4632 IEMOP_MNEMONIC("cmp Eb,Gb");
4633 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4634 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
4635}
4636
4637
4638/** Opcode 0x39. */
4639FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
4640{
4641 IEMOP_MNEMONIC("cmp Ev,Gv");
4642 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4643 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
4644}
4645
4646
4647/** Opcode 0x3a. */
4648FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
4649{
4650 IEMOP_MNEMONIC("cmp Gb,Eb");
4651 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
4652}
4653
4654
4655/** Opcode 0x3b. */
4656FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
4657{
4658 IEMOP_MNEMONIC("cmp Gv,Ev");
4659 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
4660}
4661
4662
4663/** Opcode 0x3c. */
4664FNIEMOP_DEF(iemOp_cmp_Al_Ib)
4665{
4666 IEMOP_MNEMONIC("cmp al,Ib");
4667 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
4668}
4669
4670
4671/** Opcode 0x3d. */
4672FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
4673{
4674 IEMOP_MNEMONIC("cmp rAX,Iz");
4675 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
4676}
4677
4678
4679/** Opcode 0x3e. */
4680FNIEMOP_DEF(iemOp_seg_DS)
4681{
4682 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
4683 pIemCpu->iEffSeg = X86_SREG_DS;
4684
4685 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4686 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4687}
4688
4689
4690/** Opcode 0x3f. */
4691FNIEMOP_STUB(iemOp_aas);
4692
4693/**
4694 * Common 'inc/dec/not/neg register' helper.
4695 */
4696FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
4697{
4698 IEMOP_HLP_NO_LOCK_PREFIX();
4699 switch (pIemCpu->enmEffOpSize)
4700 {
4701 case IEMMODE_16BIT:
4702 IEM_MC_BEGIN(2, 0);
4703 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4704 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4705 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
4706 IEM_MC_REF_EFLAGS(pEFlags);
4707 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
4708 IEM_MC_ADVANCE_RIP();
4709 IEM_MC_END();
4710 return VINF_SUCCESS;
4711
4712 case IEMMODE_32BIT:
4713 IEM_MC_BEGIN(2, 0);
4714 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4715 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4716 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4717 IEM_MC_REF_EFLAGS(pEFlags);
4718 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
4719 IEM_MC_ADVANCE_RIP();
4720 IEM_MC_END();
4721 return VINF_SUCCESS;
4722
4723 case IEMMODE_64BIT:
4724 IEM_MC_BEGIN(2, 0);
4725 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4726 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4727 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4728 IEM_MC_REF_EFLAGS(pEFlags);
4729 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
4730 IEM_MC_ADVANCE_RIP();
4731 IEM_MC_END();
4732 return VINF_SUCCESS;
4733 }
4734 return VINF_SUCCESS;
4735}
4736
4737
4738/** Opcode 0x40. */
4739FNIEMOP_DEF(iemOp_inc_eAX)
4740{
4741 /*
4742 * This is a REX prefix in 64-bit mode.
4743 */
4744 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4745 {
4746 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
4747
4748 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4749 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4750 }
4751
4752 IEMOP_MNEMONIC("inc eAX");
4753 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
4754}
4755
4756
4757/** Opcode 0x41. */
4758FNIEMOP_DEF(iemOp_inc_eCX)
4759{
4760 /*
4761 * This is a REX prefix in 64-bit mode.
4762 */
4763 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4764 {
4765 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
4766 pIemCpu->uRexB = 1 << 3;
4767
4768 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4769 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4770 }
4771
4772 IEMOP_MNEMONIC("inc eCX");
4773 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
4774}
4775
4776
4777/** Opcode 0x42. */
4778FNIEMOP_DEF(iemOp_inc_eDX)
4779{
4780 /*
4781 * This is a REX prefix in 64-bit mode.
4782 */
4783 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4784 {
4785 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
4786 pIemCpu->uRexIndex = 1 << 3;
4787
4788 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4789 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4790 }
4791
4792 IEMOP_MNEMONIC("inc eDX");
4793 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
4794}
4795
4796
4797
4798/** Opcode 0x43. */
4799FNIEMOP_DEF(iemOp_inc_eBX)
4800{
4801 /*
4802 * This is a REX prefix in 64-bit mode.
4803 */
4804 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4805 {
4806 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
4807 pIemCpu->uRexB = 1 << 3;
4808 pIemCpu->uRexIndex = 1 << 3;
4809
4810 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4811 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4812 }
4813
4814 IEMOP_MNEMONIC("inc eBX");
4815 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
4816}
4817
4818
4819/** Opcode 0x44. */
4820FNIEMOP_DEF(iemOp_inc_eSP)
4821{
4822 /*
4823 * This is a REX prefix in 64-bit mode.
4824 */
4825 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4826 {
4827 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
4828 pIemCpu->uRexReg = 1 << 3;
4829
4830 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4831 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4832 }
4833
4834 IEMOP_MNEMONIC("inc eSP");
4835 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
4836}
4837
4838
4839/** Opcode 0x45. */
4840FNIEMOP_DEF(iemOp_inc_eBP)
4841{
4842 /*
4843 * This is a REX prefix in 64-bit mode.
4844 */
4845 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4846 {
4847 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
4848 pIemCpu->uRexReg = 1 << 3;
4849 pIemCpu->uRexB = 1 << 3;
4850
4851 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4852 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4853 }
4854
4855 IEMOP_MNEMONIC("inc eBP");
4856 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
4857}
4858
4859
4860/** Opcode 0x46. */
4861FNIEMOP_DEF(iemOp_inc_eSI)
4862{
4863 /*
4864 * This is a REX prefix in 64-bit mode.
4865 */
4866 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4867 {
4868 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
4869 pIemCpu->uRexReg = 1 << 3;
4870 pIemCpu->uRexIndex = 1 << 3;
4871
4872 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4873 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4874 }
4875
4876 IEMOP_MNEMONIC("inc eSI");
4877 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
4878}
4879
4880
4881/** Opcode 0x47. */
4882FNIEMOP_DEF(iemOp_inc_eDI)
4883{
4884 /*
4885 * This is a REX prefix in 64-bit mode.
4886 */
4887 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4888 {
4889 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
4890 pIemCpu->uRexReg = 1 << 3;
4891 pIemCpu->uRexB = 1 << 3;
4892 pIemCpu->uRexIndex = 1 << 3;
4893
4894 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4895 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4896 }
4897
4898 IEMOP_MNEMONIC("inc eDI");
4899 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
4900}
4901
4902
4903/** Opcode 0x48. */
4904FNIEMOP_DEF(iemOp_dec_eAX)
4905{
4906 /*
4907 * This is a REX prefix in 64-bit mode.
4908 */
4909 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4910 {
4911 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
4912 iemRecalEffOpSize(pIemCpu);
4913
4914 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4915 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4916 }
4917
4918 IEMOP_MNEMONIC("dec eAX");
4919 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
4920}
4921
4922
4923/** Opcode 0x49. */
4924FNIEMOP_DEF(iemOp_dec_eCX)
4925{
4926 /*
4927 * This is a REX prefix in 64-bit mode.
4928 */
4929 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4930 {
4931 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
4932 pIemCpu->uRexB = 1 << 3;
4933 iemRecalEffOpSize(pIemCpu);
4934
4935 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4936 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4937 }
4938
4939 IEMOP_MNEMONIC("dec eCX");
4940 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
4941}
4942
4943
4944/** Opcode 0x4a. */
4945FNIEMOP_DEF(iemOp_dec_eDX)
4946{
4947 /*
4948 * This is a REX prefix in 64-bit mode.
4949 */
4950 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4951 {
4952 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
4953 pIemCpu->uRexIndex = 1 << 3;
4954 iemRecalEffOpSize(pIemCpu);
4955
4956 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4957 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4958 }
4959
4960 IEMOP_MNEMONIC("dec eDX");
4961 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
4962}
4963
4964
4965/** Opcode 0x4b. */
4966FNIEMOP_DEF(iemOp_dec_eBX)
4967{
4968 /*
4969 * This is a REX prefix in 64-bit mode.
4970 */
4971 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4972 {
4973 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
4974 pIemCpu->uRexB = 1 << 3;
4975 pIemCpu->uRexIndex = 1 << 3;
4976 iemRecalEffOpSize(pIemCpu);
4977
4978 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4979 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4980 }
4981
4982 IEMOP_MNEMONIC("dec eBX");
4983 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
4984}
4985
4986
4987/** Opcode 0x4c. */
4988FNIEMOP_DEF(iemOp_dec_eSP)
4989{
4990 /*
4991 * This is a REX prefix in 64-bit mode.
4992 */
4993 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4994 {
4995 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
4996 pIemCpu->uRexReg = 1 << 3;
4997 iemRecalEffOpSize(pIemCpu);
4998
4999 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5000 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5001 }
5002
5003 IEMOP_MNEMONIC("dec eSP");
5004 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5005}
5006
5007
5008/** Opcode 0x4d. */
5009FNIEMOP_DEF(iemOp_dec_eBP)
5010{
5011 /*
5012 * This is a REX prefix in 64-bit mode.
5013 */
5014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5015 {
5016 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5017 pIemCpu->uRexReg = 1 << 3;
5018 pIemCpu->uRexB = 1 << 3;
5019 iemRecalEffOpSize(pIemCpu);
5020
5021 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5022 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5023 }
5024
5025 IEMOP_MNEMONIC("dec eBP");
5026 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5027}
5028
5029
5030/** Opcode 0x4e. */
5031FNIEMOP_DEF(iemOp_dec_eSI)
5032{
5033 /*
5034 * This is a REX prefix in 64-bit mode.
5035 */
5036 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5037 {
5038 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5039 pIemCpu->uRexReg = 1 << 3;
5040 pIemCpu->uRexIndex = 1 << 3;
5041 iemRecalEffOpSize(pIemCpu);
5042
5043 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5044 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5045 }
5046
5047 IEMOP_MNEMONIC("dec eSI");
5048 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5049}
5050
5051
5052/** Opcode 0x4f. */
5053FNIEMOP_DEF(iemOp_dec_eDI)
5054{
5055 /*
5056 * This is a REX prefix in 64-bit mode.
5057 */
5058 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5059 {
5060 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5061 pIemCpu->uRexReg = 1 << 3;
5062 pIemCpu->uRexB = 1 << 3;
5063 pIemCpu->uRexIndex = 1 << 3;
5064 iemRecalEffOpSize(pIemCpu);
5065
5066 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5067 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5068 }
5069
5070 IEMOP_MNEMONIC("dec eDI");
5071 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5072}
5073
5074
5075/**
5076 * Common 'push register' helper.
5077 */
5078FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5079{
5080 IEMOP_HLP_NO_LOCK_PREFIX();
5081 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5082 {
5083 iReg |= pIemCpu->uRexB;
5084 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5085 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5086 }
5087
5088 switch (pIemCpu->enmEffOpSize)
5089 {
5090 case IEMMODE_16BIT:
5091 IEM_MC_BEGIN(0, 1);
5092 IEM_MC_LOCAL(uint16_t, u16Value);
5093 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5094 IEM_MC_PUSH_U16(u16Value);
5095 IEM_MC_ADVANCE_RIP();
5096 IEM_MC_END();
5097 break;
5098
5099 case IEMMODE_32BIT:
5100 IEM_MC_BEGIN(0, 1);
5101 IEM_MC_LOCAL(uint32_t, u32Value);
5102 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5103 IEM_MC_PUSH_U32(u32Value);
5104 IEM_MC_ADVANCE_RIP();
5105 IEM_MC_END();
5106 break;
5107
5108 case IEMMODE_64BIT:
5109 IEM_MC_BEGIN(0, 1);
5110 IEM_MC_LOCAL(uint64_t, u64Value);
5111 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5112 IEM_MC_PUSH_U64(u64Value);
5113 IEM_MC_ADVANCE_RIP();
5114 IEM_MC_END();
5115 break;
5116 }
5117
5118 return VINF_SUCCESS;
5119}
5120
5121
5122/** Opcode 0x50. */
5123FNIEMOP_DEF(iemOp_push_eAX)
5124{
5125 IEMOP_MNEMONIC("push rAX");
5126 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5127}
5128
5129
5130/** Opcode 0x51. */
5131FNIEMOP_DEF(iemOp_push_eCX)
5132{
5133 IEMOP_MNEMONIC("push rCX");
5134 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5135}
5136
5137
5138/** Opcode 0x52. */
5139FNIEMOP_DEF(iemOp_push_eDX)
5140{
5141 IEMOP_MNEMONIC("push rDX");
5142 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5143}
5144
5145
5146/** Opcode 0x53. */
5147FNIEMOP_DEF(iemOp_push_eBX)
5148{
5149 IEMOP_MNEMONIC("push rBX");
5150 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5151}
5152
5153
5154/** Opcode 0x54. */
5155FNIEMOP_DEF(iemOp_push_eSP)
5156{
5157 IEMOP_MNEMONIC("push rSP");
5158 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5159}
5160
5161
5162/** Opcode 0x55. */
5163FNIEMOP_DEF(iemOp_push_eBP)
5164{
5165 IEMOP_MNEMONIC("push rBP");
5166 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5167}
5168
5169
5170/** Opcode 0x56. */
5171FNIEMOP_DEF(iemOp_push_eSI)
5172{
5173 IEMOP_MNEMONIC("push rSI");
5174 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5175}
5176
5177
5178/** Opcode 0x57. */
5179FNIEMOP_DEF(iemOp_push_eDI)
5180{
5181 IEMOP_MNEMONIC("push rDI");
5182 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5183}
5184
5185
5186/**
5187 * Common 'pop register' helper.
5188 */
5189FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5190{
5191 IEMOP_HLP_NO_LOCK_PREFIX();
5192 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5193 {
5194 iReg |= pIemCpu->uRexB;
5195 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5196 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5197 }
5198
5199/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5200 * handle it, for that matter (Intel pseudo code hints that the popped
5201 * value is incremented by the stack item size.) Test it, both encodings
5202 * and all three register sizes. */
5203 switch (pIemCpu->enmEffOpSize)
5204 {
5205 case IEMMODE_16BIT:
5206 IEM_MC_BEGIN(0, 1);
5207 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5208 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5209 IEM_MC_POP_U16(pu16Dst);
5210 IEM_MC_ADVANCE_RIP();
5211 IEM_MC_END();
5212 break;
5213
5214 case IEMMODE_32BIT:
5215 IEM_MC_BEGIN(0, 1);
5216 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5217 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5218 IEM_MC_POP_U32(pu32Dst);
5219 IEM_MC_ADVANCE_RIP();
5220 IEM_MC_END();
5221 break;
5222
5223 case IEMMODE_64BIT:
5224 IEM_MC_BEGIN(0, 1);
5225 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5226 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5227 IEM_MC_POP_U64(pu64Dst);
5228 IEM_MC_ADVANCE_RIP();
5229 IEM_MC_END();
5230 break;
5231 }
5232
5233 return VINF_SUCCESS;
5234}
5235
5236
5237/** Opcode 0x58. */
5238FNIEMOP_DEF(iemOp_pop_eAX)
5239{
5240 IEMOP_MNEMONIC("pop rAX");
5241 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5242}
5243
5244
5245/** Opcode 0x59. */
5246FNIEMOP_DEF(iemOp_pop_eCX)
5247{
5248 IEMOP_MNEMONIC("pop rCX");
5249 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5250}
5251
5252
5253/** Opcode 0x5a. */
5254FNIEMOP_DEF(iemOp_pop_eDX)
5255{
5256 IEMOP_MNEMONIC("pop rDX");
5257 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5258}
5259
5260
5261/** Opcode 0x5b. */
5262FNIEMOP_DEF(iemOp_pop_eBX)
5263{
5264 IEMOP_MNEMONIC("pop rBX");
5265 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5266}
5267
5268
5269/** Opcode 0x5c. */
5270FNIEMOP_DEF(iemOp_pop_eSP)
5271{
5272 IEMOP_MNEMONIC("pop rSP");
5273 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5274}
5275
5276
5277/** Opcode 0x5d. */
5278FNIEMOP_DEF(iemOp_pop_eBP)
5279{
5280 IEMOP_MNEMONIC("pop rBP");
5281 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5282}
5283
5284
5285/** Opcode 0x5e. */
5286FNIEMOP_DEF(iemOp_pop_eSI)
5287{
5288 IEMOP_MNEMONIC("pop rSI");
5289 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5290}
5291
5292
5293/** Opcode 0x5f. */
5294FNIEMOP_DEF(iemOp_pop_eDI)
5295{
5296 IEMOP_MNEMONIC("pop rDI");
5297 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5298}
5299
5300
5301/** Opcode 0x60. */
5302FNIEMOP_DEF(iemOp_pusha)
5303{
5304 IEMOP_MNEMONIC("pusha");
5305 IEMOP_HLP_NO_64BIT();
5306 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5307 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5308 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5309 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5310}
5311
5312
5313/** Opcode 0x61. */
5314FNIEMOP_DEF(iemOp_popa)
5315{
5316 IEMOP_MNEMONIC("popa");
5317 IEMOP_HLP_NO_64BIT();
5318 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5319 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5320 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5321 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5322}
5323
5324
5325/** Opcode 0x62. */
5326FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5327/** Opcode 0x63. */
5328FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5329
5330
5331/** Opcode 0x64. */
5332FNIEMOP_DEF(iemOp_seg_FS)
5333{
5334 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5335 pIemCpu->iEffSeg = X86_SREG_FS;
5336
5337 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5338 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5339}
5340
5341
5342/** Opcode 0x65. */
5343FNIEMOP_DEF(iemOp_seg_GS)
5344{
5345 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5346 pIemCpu->iEffSeg = X86_SREG_GS;
5347
5348 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5349 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5350}
5351
5352
5353/** Opcode 0x66. */
5354FNIEMOP_DEF(iemOp_op_size)
5355{
5356 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5357 iemRecalEffOpSize(pIemCpu);
5358
5359 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5360 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5361}
5362
5363
5364/** Opcode 0x67. */
5365FNIEMOP_DEF(iemOp_addr_size)
5366{
5367 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5368 switch (pIemCpu->enmDefAddrMode)
5369 {
5370 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5371 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5372 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5373 default: AssertFailed();
5374 }
5375
5376 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5377 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5378}
5379
5380
5381/** Opcode 0x68. */
5382FNIEMOP_DEF(iemOp_push_Iz)
5383{
5384 IEMOP_MNEMONIC("push Iz");
5385 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5386 switch (pIemCpu->enmEffOpSize)
5387 {
5388 case IEMMODE_16BIT:
5389 {
5390 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
5391 IEMOP_HLP_NO_LOCK_PREFIX();
5392 IEM_MC_BEGIN(0,0);
5393 IEM_MC_PUSH_U16(u16Imm);
5394 IEM_MC_ADVANCE_RIP();
5395 IEM_MC_END();
5396 return VINF_SUCCESS;
5397 }
5398
5399 case IEMMODE_32BIT:
5400 {
5401 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
5402 IEMOP_HLP_NO_LOCK_PREFIX();
5403 IEM_MC_BEGIN(0,0);
5404 IEM_MC_PUSH_U32(u32Imm);
5405 IEM_MC_ADVANCE_RIP();
5406 IEM_MC_END();
5407 return VINF_SUCCESS;
5408 }
5409
5410 case IEMMODE_64BIT:
5411 {
5412 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
5413 IEMOP_HLP_NO_LOCK_PREFIX();
5414 IEM_MC_BEGIN(0,0);
5415 IEM_MC_PUSH_U64(u64Imm);
5416 IEM_MC_ADVANCE_RIP();
5417 IEM_MC_END();
5418 return VINF_SUCCESS;
5419 }
5420
5421 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5422 }
5423}
5424
5425
5426/** Opcode 0x69. */
5427FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5428{
5429 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5430 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
5431 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5432
5433 switch (pIemCpu->enmEffOpSize)
5434 {
5435 case IEMMODE_16BIT:
5436 {
5437 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
5438 IEMOP_HLP_NO_LOCK_PREFIX();
5439 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5440 {
5441 /* register operand */
5442 IEM_MC_BEGIN(3, 1);
5443 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5444 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5445 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5446 IEM_MC_LOCAL(uint16_t, u16Tmp);
5447
5448 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5449 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5450 IEM_MC_REF_EFLAGS(pEFlags);
5451 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5452 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5453
5454 IEM_MC_ADVANCE_RIP();
5455 IEM_MC_END();
5456 }
5457 else
5458 {
5459 /* memory operand */
5460 IEM_MC_BEGIN(3, 2);
5461 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5462 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5463 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5464 IEM_MC_LOCAL(uint16_t, u16Tmp);
5465 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5466
5467 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5468 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5469 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5470 IEM_MC_REF_EFLAGS(pEFlags);
5471 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5472 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5473
5474 IEM_MC_ADVANCE_RIP();
5475 IEM_MC_END();
5476 }
5477 return VINF_SUCCESS;
5478 }
5479
5480 case IEMMODE_32BIT:
5481 {
5482 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
5483 IEMOP_HLP_NO_LOCK_PREFIX();
5484 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5485 {
5486 /* register operand */
5487 IEM_MC_BEGIN(3, 1);
5488 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5489 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5490 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5491 IEM_MC_LOCAL(uint32_t, u32Tmp);
5492
5493 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5494 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5495 IEM_MC_REF_EFLAGS(pEFlags);
5496 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5497 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5498
5499 IEM_MC_ADVANCE_RIP();
5500 IEM_MC_END();
5501 }
5502 else
5503 {
5504 /* memory operand */
5505 IEM_MC_BEGIN(3, 2);
5506 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5507 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5508 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5509 IEM_MC_LOCAL(uint32_t, u32Tmp);
5510 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5511
5512 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5513 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5514 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5515 IEM_MC_REF_EFLAGS(pEFlags);
5516 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5517 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5518
5519 IEM_MC_ADVANCE_RIP();
5520 IEM_MC_END();
5521 }
5522 return VINF_SUCCESS;
5523 }
5524
5525 case IEMMODE_64BIT:
5526 {
5527 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
5528 IEMOP_HLP_NO_LOCK_PREFIX();
5529 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5530 {
5531 /* register operand */
5532 IEM_MC_BEGIN(3, 1);
5533 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5534 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5535 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5536 IEM_MC_LOCAL(uint64_t, u64Tmp);
5537
5538 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5539 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5540 IEM_MC_REF_EFLAGS(pEFlags);
5541 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5542 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5543
5544 IEM_MC_ADVANCE_RIP();
5545 IEM_MC_END();
5546 }
5547 else
5548 {
5549 /* memory operand */
5550 IEM_MC_BEGIN(3, 2);
5551 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5552 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5553 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5554 IEM_MC_LOCAL(uint64_t, u64Tmp);
5555 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5556
5557 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5558 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5559 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5560 IEM_MC_REF_EFLAGS(pEFlags);
5561 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5562 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5563
5564 IEM_MC_ADVANCE_RIP();
5565 IEM_MC_END();
5566 }
5567 return VINF_SUCCESS;
5568 }
5569 }
5570 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5571}
5572
5573
5574/** Opcode 0x6a. */
5575FNIEMOP_DEF(iemOp_push_Ib)
5576{
5577 IEMOP_MNEMONIC("push Ib");
5578 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
5579 IEMOP_HLP_NO_LOCK_PREFIX();
5580 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5581
5582 IEM_MC_BEGIN(0,0);
5583 switch (pIemCpu->enmEffOpSize)
5584 {
5585 case IEMMODE_16BIT:
5586 IEM_MC_PUSH_U16(i8Imm);
5587 break;
5588 case IEMMODE_32BIT:
5589 IEM_MC_PUSH_U32(i8Imm);
5590 break;
5591 case IEMMODE_64BIT:
5592 IEM_MC_PUSH_U64(i8Imm);
5593 break;
5594 }
5595 IEM_MC_ADVANCE_RIP();
5596 IEM_MC_END();
5597 return VINF_SUCCESS;
5598}
5599
5600
5601/** Opcode 0x6b. */
5602FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
5603{
5604 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
5605 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
5606 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
5607 IEMOP_HLP_NO_LOCK_PREFIX();
5608 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5609
5610 switch (pIemCpu->enmEffOpSize)
5611 {
5612 case IEMMODE_16BIT:
5613 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5614 {
5615 /* register operand */
5616 IEM_MC_BEGIN(3, 1);
5617 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5618 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5619 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5620 IEM_MC_LOCAL(uint16_t, u16Tmp);
5621
5622 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5623 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5624 IEM_MC_REF_EFLAGS(pEFlags);
5625 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5626 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5627
5628 IEM_MC_ADVANCE_RIP();
5629 IEM_MC_END();
5630 }
5631 else
5632 {
5633 /* memory operand */
5634 IEM_MC_BEGIN(3, 2);
5635 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5636 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5637 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5638 IEM_MC_LOCAL(uint16_t, u16Tmp);
5639 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5640
5641 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5642 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5643 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5644 IEM_MC_REF_EFLAGS(pEFlags);
5645 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5646 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5647
5648 IEM_MC_ADVANCE_RIP();
5649 IEM_MC_END();
5650 }
5651 return VINF_SUCCESS;
5652
5653 case IEMMODE_32BIT:
5654 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5655 {
5656 /* register operand */
5657 IEM_MC_BEGIN(3, 1);
5658 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5659 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5660 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5661 IEM_MC_LOCAL(uint32_t, u32Tmp);
5662
5663 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5664 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5665 IEM_MC_REF_EFLAGS(pEFlags);
5666 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5667 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5668
5669 IEM_MC_ADVANCE_RIP();
5670 IEM_MC_END();
5671 }
5672 else
5673 {
5674 /* memory operand */
5675 IEM_MC_BEGIN(3, 2);
5676 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5677 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5678 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5679 IEM_MC_LOCAL(uint32_t, u32Tmp);
5680 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5681
5682 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5683 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5684 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5685 IEM_MC_REF_EFLAGS(pEFlags);
5686 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5687 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5688
5689 IEM_MC_ADVANCE_RIP();
5690 IEM_MC_END();
5691 }
5692 return VINF_SUCCESS;
5693
5694 case IEMMODE_64BIT:
5695 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5696 {
5697 /* register operand */
5698 IEM_MC_BEGIN(3, 1);
5699 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5700 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5701 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5702 IEM_MC_LOCAL(uint64_t, u64Tmp);
5703
5704 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5705 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5706 IEM_MC_REF_EFLAGS(pEFlags);
5707 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5708 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5709
5710 IEM_MC_ADVANCE_RIP();
5711 IEM_MC_END();
5712 }
5713 else
5714 {
5715 /* memory operand */
5716 IEM_MC_BEGIN(3, 2);
5717 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5718 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5719 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5720 IEM_MC_LOCAL(uint64_t, u64Tmp);
5721 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5722
5723 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5724 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5725 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5726 IEM_MC_REF_EFLAGS(pEFlags);
5727 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5728 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5729
5730 IEM_MC_ADVANCE_RIP();
5731 IEM_MC_END();
5732 }
5733 return VINF_SUCCESS;
5734 }
5735 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5736}
5737
5738
5739/** Opcode 0x6c. */
5740FNIEMOP_DEF(iemOp_insb_Yb_DX)
5741{
5742 IEMOP_HLP_NO_LOCK_PREFIX();
5743 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
5744 {
5745 IEMOP_MNEMONIC("rep ins Yb,DX");
5746 switch (pIemCpu->enmEffAddrMode)
5747 {
5748 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
5749 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
5750 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
5751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5752 }
5753 }
5754 else
5755 {
5756 IEMOP_MNEMONIC("ins Yb,DX");
5757 switch (pIemCpu->enmEffAddrMode)
5758 {
5759 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
5760 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
5761 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
5762 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5763 }
5764 }
5765}
5766
5767
5768/** Opcode 0x6d. */
5769FNIEMOP_DEF(iemOp_inswd_Yv_DX)
5770{
5771 IEMOP_HLP_NO_LOCK_PREFIX();
5772 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
5773 {
5774 IEMOP_MNEMONIC("rep ins Yv,DX");
5775 switch (pIemCpu->enmEffOpSize)
5776 {
5777 case IEMMODE_16BIT:
5778 switch (pIemCpu->enmEffAddrMode)
5779 {
5780 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
5781 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
5782 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
5783 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5784 }
5785 break;
5786 case IEMMODE_64BIT:
5787 case IEMMODE_32BIT:
5788 switch (pIemCpu->enmEffAddrMode)
5789 {
5790 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
5791 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
5792 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
5793 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5794 }
5795 break;
5796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5797 }
5798 }
5799 else
5800 {
5801 IEMOP_MNEMONIC("ins Yv,DX");
5802 switch (pIemCpu->enmEffOpSize)
5803 {
5804 case IEMMODE_16BIT:
5805 switch (pIemCpu->enmEffAddrMode)
5806 {
5807 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
5808 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
5809 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
5810 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5811 }
5812 break;
5813 case IEMMODE_64BIT:
5814 case IEMMODE_32BIT:
5815 switch (pIemCpu->enmEffAddrMode)
5816 {
5817 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
5818 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
5819 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
5820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5821 }
5822 break;
5823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5824 }
5825 }
5826}
5827
5828
5829/** Opcode 0x6e. */
5830FNIEMOP_DEF(iemOp_outsb_Yb_DX)
5831{
5832 IEMOP_HLP_NO_LOCK_PREFIX();
5833 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
5834 {
5835 IEMOP_MNEMONIC("rep out DX,Yb");
5836 switch (pIemCpu->enmEffAddrMode)
5837 {
5838 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
5839 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
5840 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
5841 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5842 }
5843 }
5844 else
5845 {
5846 IEMOP_MNEMONIC("out DX,Yb");
5847 switch (pIemCpu->enmEffAddrMode)
5848 {
5849 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
5850 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
5851 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
5852 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5853 }
5854 }
5855}
5856
5857
5858/** Opcode 0x6f. */
5859FNIEMOP_DEF(iemOp_outswd_Yv_DX)
5860{
5861 IEMOP_HLP_NO_LOCK_PREFIX();
5862 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
5863 {
5864 IEMOP_MNEMONIC("rep outs DX,Yv");
5865 switch (pIemCpu->enmEffOpSize)
5866 {
5867 case IEMMODE_16BIT:
5868 switch (pIemCpu->enmEffAddrMode)
5869 {
5870 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
5871 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
5872 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
5873 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5874 }
5875 break;
5876 case IEMMODE_64BIT:
5877 case IEMMODE_32BIT:
5878 switch (pIemCpu->enmEffAddrMode)
5879 {
5880 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
5881 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
5882 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
5883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5884 }
5885 break;
5886 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5887 }
5888 }
5889 else
5890 {
5891 IEMOP_MNEMONIC("outs DX,Yv");
5892 switch (pIemCpu->enmEffOpSize)
5893 {
5894 case IEMMODE_16BIT:
5895 switch (pIemCpu->enmEffAddrMode)
5896 {
5897 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
5898 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
5899 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
5900 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5901 }
5902 break;
5903 case IEMMODE_64BIT:
5904 case IEMMODE_32BIT:
5905 switch (pIemCpu->enmEffAddrMode)
5906 {
5907 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
5908 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
5909 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
5910 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5911 }
5912 break;
5913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5914 }
5915 }
5916}
5917
5918
5919/** Opcode 0x70. */
5920FNIEMOP_DEF(iemOp_jo_Jb)
5921{
5922 IEMOP_MNEMONIC("jo Jb");
5923 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
5924 IEMOP_HLP_NO_LOCK_PREFIX();
5925 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5926
5927 IEM_MC_BEGIN(0, 0);
5928 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
5929 IEM_MC_REL_JMP_S8(i8Imm);
5930 } IEM_MC_ELSE() {
5931 IEM_MC_ADVANCE_RIP();
5932 } IEM_MC_ENDIF();
5933 IEM_MC_END();
5934 return VINF_SUCCESS;
5935}
5936
5937
5938/** Opcode 0x71. */
5939FNIEMOP_DEF(iemOp_jno_Jb)
5940{
5941 IEMOP_MNEMONIC("jno Jb");
5942 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
5943 IEMOP_HLP_NO_LOCK_PREFIX();
5944 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5945
5946 IEM_MC_BEGIN(0, 0);
5947 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
5948 IEM_MC_ADVANCE_RIP();
5949 } IEM_MC_ELSE() {
5950 IEM_MC_REL_JMP_S8(i8Imm);
5951 } IEM_MC_ENDIF();
5952 IEM_MC_END();
5953 return VINF_SUCCESS;
5954}
5955
5956/** Opcode 0x72. */
5957FNIEMOP_DEF(iemOp_jc_Jb)
5958{
5959 IEMOP_MNEMONIC("jc/jnae Jb");
5960 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
5961 IEMOP_HLP_NO_LOCK_PREFIX();
5962 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5963
5964 IEM_MC_BEGIN(0, 0);
5965 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
5966 IEM_MC_REL_JMP_S8(i8Imm);
5967 } IEM_MC_ELSE() {
5968 IEM_MC_ADVANCE_RIP();
5969 } IEM_MC_ENDIF();
5970 IEM_MC_END();
5971 return VINF_SUCCESS;
5972}
5973
5974
5975/** Opcode 0x73. */
5976FNIEMOP_DEF(iemOp_jnc_Jb)
5977{
5978 IEMOP_MNEMONIC("jnc/jnb Jb");
5979 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
5980 IEMOP_HLP_NO_LOCK_PREFIX();
5981 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5982
5983 IEM_MC_BEGIN(0, 0);
5984 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
5985 IEM_MC_ADVANCE_RIP();
5986 } IEM_MC_ELSE() {
5987 IEM_MC_REL_JMP_S8(i8Imm);
5988 } IEM_MC_ENDIF();
5989 IEM_MC_END();
5990 return VINF_SUCCESS;
5991}
5992
5993
5994/** Opcode 0x74. */
5995FNIEMOP_DEF(iemOp_je_Jb)
5996{
5997 IEMOP_MNEMONIC("je/jz Jb");
5998 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
5999 IEMOP_HLP_NO_LOCK_PREFIX();
6000 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6001
6002 IEM_MC_BEGIN(0, 0);
6003 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6004 IEM_MC_REL_JMP_S8(i8Imm);
6005 } IEM_MC_ELSE() {
6006 IEM_MC_ADVANCE_RIP();
6007 } IEM_MC_ENDIF();
6008 IEM_MC_END();
6009 return VINF_SUCCESS;
6010}
6011
6012
6013/** Opcode 0x75. */
6014FNIEMOP_DEF(iemOp_jne_Jb)
6015{
6016 IEMOP_MNEMONIC("jne/jnz Jb");
6017 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6018 IEMOP_HLP_NO_LOCK_PREFIX();
6019 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6020
6021 IEM_MC_BEGIN(0, 0);
6022 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6023 IEM_MC_ADVANCE_RIP();
6024 } IEM_MC_ELSE() {
6025 IEM_MC_REL_JMP_S8(i8Imm);
6026 } IEM_MC_ENDIF();
6027 IEM_MC_END();
6028 return VINF_SUCCESS;
6029}
6030
6031
6032/** Opcode 0x76. */
6033FNIEMOP_DEF(iemOp_jbe_Jb)
6034{
6035 IEMOP_MNEMONIC("jbe/jna Jb");
6036 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6037 IEMOP_HLP_NO_LOCK_PREFIX();
6038 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6039
6040 IEM_MC_BEGIN(0, 0);
6041 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6042 IEM_MC_REL_JMP_S8(i8Imm);
6043 } IEM_MC_ELSE() {
6044 IEM_MC_ADVANCE_RIP();
6045 } IEM_MC_ENDIF();
6046 IEM_MC_END();
6047 return VINF_SUCCESS;
6048}
6049
6050
6051/** Opcode 0x77. */
6052FNIEMOP_DEF(iemOp_jnbe_Jb)
6053{
6054 IEMOP_MNEMONIC("jnbe/ja Jb");
6055 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6056 IEMOP_HLP_NO_LOCK_PREFIX();
6057 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6058
6059 IEM_MC_BEGIN(0, 0);
6060 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6061 IEM_MC_ADVANCE_RIP();
6062 } IEM_MC_ELSE() {
6063 IEM_MC_REL_JMP_S8(i8Imm);
6064 } IEM_MC_ENDIF();
6065 IEM_MC_END();
6066 return VINF_SUCCESS;
6067}
6068
6069
6070/** Opcode 0x78. */
6071FNIEMOP_DEF(iemOp_js_Jb)
6072{
6073 IEMOP_MNEMONIC("js Jb");
6074 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6075 IEMOP_HLP_NO_LOCK_PREFIX();
6076 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6077
6078 IEM_MC_BEGIN(0, 0);
6079 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6080 IEM_MC_REL_JMP_S8(i8Imm);
6081 } IEM_MC_ELSE() {
6082 IEM_MC_ADVANCE_RIP();
6083 } IEM_MC_ENDIF();
6084 IEM_MC_END();
6085 return VINF_SUCCESS;
6086}
6087
6088
6089/** Opcode 0x79. */
6090FNIEMOP_DEF(iemOp_jns_Jb)
6091{
6092 IEMOP_MNEMONIC("jns Jb");
6093 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6094 IEMOP_HLP_NO_LOCK_PREFIX();
6095 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6096
6097 IEM_MC_BEGIN(0, 0);
6098 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6099 IEM_MC_ADVANCE_RIP();
6100 } IEM_MC_ELSE() {
6101 IEM_MC_REL_JMP_S8(i8Imm);
6102 } IEM_MC_ENDIF();
6103 IEM_MC_END();
6104 return VINF_SUCCESS;
6105}
6106
6107
6108/** Opcode 0x7a. */
6109FNIEMOP_DEF(iemOp_jp_Jb)
6110{
6111 IEMOP_MNEMONIC("jp Jb");
6112 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6113 IEMOP_HLP_NO_LOCK_PREFIX();
6114 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6115
6116 IEM_MC_BEGIN(0, 0);
6117 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6118 IEM_MC_REL_JMP_S8(i8Imm);
6119 } IEM_MC_ELSE() {
6120 IEM_MC_ADVANCE_RIP();
6121 } IEM_MC_ENDIF();
6122 IEM_MC_END();
6123 return VINF_SUCCESS;
6124}
6125
6126
6127/** Opcode 0x7b. */
6128FNIEMOP_DEF(iemOp_jnp_Jb)
6129{
6130 IEMOP_MNEMONIC("jnp Jb");
6131 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6132 IEMOP_HLP_NO_LOCK_PREFIX();
6133 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6134
6135 IEM_MC_BEGIN(0, 0);
6136 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6137 IEM_MC_ADVANCE_RIP();
6138 } IEM_MC_ELSE() {
6139 IEM_MC_REL_JMP_S8(i8Imm);
6140 } IEM_MC_ENDIF();
6141 IEM_MC_END();
6142 return VINF_SUCCESS;
6143}
6144
6145
6146/** Opcode 0x7c. */
6147FNIEMOP_DEF(iemOp_jl_Jb)
6148{
6149 IEMOP_MNEMONIC("jl/jnge Jb");
6150 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6151 IEMOP_HLP_NO_LOCK_PREFIX();
6152 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6153
6154 IEM_MC_BEGIN(0, 0);
6155 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6156 IEM_MC_REL_JMP_S8(i8Imm);
6157 } IEM_MC_ELSE() {
6158 IEM_MC_ADVANCE_RIP();
6159 } IEM_MC_ENDIF();
6160 IEM_MC_END();
6161 return VINF_SUCCESS;
6162}
6163
6164
6165/** Opcode 0x7d. */
6166FNIEMOP_DEF(iemOp_jnl_Jb)
6167{
6168 IEMOP_MNEMONIC("jnl/jge Jb");
6169 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6170 IEMOP_HLP_NO_LOCK_PREFIX();
6171 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6172
6173 IEM_MC_BEGIN(0, 0);
6174 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6175 IEM_MC_ADVANCE_RIP();
6176 } IEM_MC_ELSE() {
6177 IEM_MC_REL_JMP_S8(i8Imm);
6178 } IEM_MC_ENDIF();
6179 IEM_MC_END();
6180 return VINF_SUCCESS;
6181}
6182
6183
6184/** Opcode 0x7e. */
6185FNIEMOP_DEF(iemOp_jle_Jb)
6186{
6187 IEMOP_MNEMONIC("jle/jng Jb");
6188 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6189 IEMOP_HLP_NO_LOCK_PREFIX();
6190 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6191
6192 IEM_MC_BEGIN(0, 0);
6193 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6194 IEM_MC_REL_JMP_S8(i8Imm);
6195 } IEM_MC_ELSE() {
6196 IEM_MC_ADVANCE_RIP();
6197 } IEM_MC_ENDIF();
6198 IEM_MC_END();
6199 return VINF_SUCCESS;
6200}
6201
6202
6203/** Opcode 0x7f. */
6204FNIEMOP_DEF(iemOp_jnle_Jb)
6205{
6206 IEMOP_MNEMONIC("jnle/jg Jb");
6207 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
6208 IEMOP_HLP_NO_LOCK_PREFIX();
6209 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6210
6211 IEM_MC_BEGIN(0, 0);
6212 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6213 IEM_MC_ADVANCE_RIP();
6214 } IEM_MC_ELSE() {
6215 IEM_MC_REL_JMP_S8(i8Imm);
6216 } IEM_MC_ENDIF();
6217 IEM_MC_END();
6218 return VINF_SUCCESS;
6219}
6220
6221
6222/** Opcode 0x80. */
6223FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6224{
6225 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6226 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6227 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6228
6229 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6230 {
6231 /* register target */
6232 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
6233 IEMOP_HLP_NO_LOCK_PREFIX();
6234 IEM_MC_BEGIN(3, 0);
6235 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6236 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6237 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6238
6239 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6240 IEM_MC_REF_EFLAGS(pEFlags);
6241 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6242
6243 IEM_MC_ADVANCE_RIP();
6244 IEM_MC_END();
6245 }
6246 else
6247 {
6248 /* memory target */
6249 uint32_t fAccess;
6250 if (pImpl->pfnLockedU8)
6251 fAccess = IEM_ACCESS_DATA_RW;
6252 else
6253 { /* CMP */
6254 IEMOP_HLP_NO_LOCK_PREFIX();
6255 fAccess = IEM_ACCESS_DATA_R;
6256 }
6257 IEM_MC_BEGIN(3, 2);
6258 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6259 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6260 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6261
6262 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6263 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
6264 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6265
6266 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6267 IEM_MC_FETCH_EFLAGS(EFlags);
6268 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6269 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6270 else
6271 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6272
6273 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6274 IEM_MC_COMMIT_EFLAGS(EFlags);
6275 IEM_MC_ADVANCE_RIP();
6276 IEM_MC_END();
6277 }
6278 return VINF_SUCCESS;
6279}
6280
6281
6282/** Opcode 0x81. */
6283FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6284{
6285 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6286 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6287 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6288
6289 switch (pIemCpu->enmEffOpSize)
6290 {
6291 case IEMMODE_16BIT:
6292 {
6293 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6294 {
6295 /* register target */
6296 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
6297 IEMOP_HLP_NO_LOCK_PREFIX();
6298 IEM_MC_BEGIN(3, 0);
6299 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6300 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6301 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6302
6303 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6304 IEM_MC_REF_EFLAGS(pEFlags);
6305 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6306
6307 IEM_MC_ADVANCE_RIP();
6308 IEM_MC_END();
6309 }
6310 else
6311 {
6312 /* memory target */
6313 uint32_t fAccess;
6314 if (pImpl->pfnLockedU16)
6315 fAccess = IEM_ACCESS_DATA_RW;
6316 else
6317 { /* CMP, TEST */
6318 IEMOP_HLP_NO_LOCK_PREFIX();
6319 fAccess = IEM_ACCESS_DATA_R;
6320 }
6321 IEM_MC_BEGIN(3, 2);
6322 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6323 IEM_MC_ARG(uint16_t, u16Src, 1);
6324 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6325 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6326
6327 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6328 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
6329 IEM_MC_ASSIGN(u16Src, u16Imm);
6330 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6331 IEM_MC_FETCH_EFLAGS(EFlags);
6332 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6333 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6334 else
6335 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6336
6337 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6338 IEM_MC_COMMIT_EFLAGS(EFlags);
6339 IEM_MC_ADVANCE_RIP();
6340 IEM_MC_END();
6341 }
6342 break;
6343 }
6344
6345 case IEMMODE_32BIT:
6346 {
6347 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6348 {
6349 /* register target */
6350 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
6351 IEMOP_HLP_NO_LOCK_PREFIX();
6352 IEM_MC_BEGIN(3, 0);
6353 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6354 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6355 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6356
6357 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6358 IEM_MC_REF_EFLAGS(pEFlags);
6359 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6360
6361 IEM_MC_ADVANCE_RIP();
6362 IEM_MC_END();
6363 }
6364 else
6365 {
6366 /* memory target */
6367 uint32_t fAccess;
6368 if (pImpl->pfnLockedU32)
6369 fAccess = IEM_ACCESS_DATA_RW;
6370 else
6371 { /* CMP, TEST */
6372 IEMOP_HLP_NO_LOCK_PREFIX();
6373 fAccess = IEM_ACCESS_DATA_R;
6374 }
6375 IEM_MC_BEGIN(3, 2);
6376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6377 IEM_MC_ARG(uint32_t, u32Src, 1);
6378 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6380
6381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6382 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
6383 IEM_MC_ASSIGN(u32Src, u32Imm);
6384 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6385 IEM_MC_FETCH_EFLAGS(EFlags);
6386 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6387 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6388 else
6389 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6390
6391 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6392 IEM_MC_COMMIT_EFLAGS(EFlags);
6393 IEM_MC_ADVANCE_RIP();
6394 IEM_MC_END();
6395 }
6396 break;
6397 }
6398
6399 case IEMMODE_64BIT:
6400 {
6401 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6402 {
6403 /* register target */
6404 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
6405 IEMOP_HLP_NO_LOCK_PREFIX();
6406 IEM_MC_BEGIN(3, 0);
6407 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6408 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6409 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6410
6411 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6412 IEM_MC_REF_EFLAGS(pEFlags);
6413 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6414
6415 IEM_MC_ADVANCE_RIP();
6416 IEM_MC_END();
6417 }
6418 else
6419 {
6420 /* memory target */
6421 uint32_t fAccess;
6422 if (pImpl->pfnLockedU64)
6423 fAccess = IEM_ACCESS_DATA_RW;
6424 else
6425 { /* CMP */
6426 IEMOP_HLP_NO_LOCK_PREFIX();
6427 fAccess = IEM_ACCESS_DATA_R;
6428 }
6429 IEM_MC_BEGIN(3, 2);
6430 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6431 IEM_MC_ARG(uint64_t, u64Src, 1);
6432 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6433 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6434
6435 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6436 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
6437 IEM_MC_ASSIGN(u64Src, u64Imm);
6438 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6439 IEM_MC_FETCH_EFLAGS(EFlags);
6440 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6441 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6442 else
6443 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6444
6445 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6446 IEM_MC_COMMIT_EFLAGS(EFlags);
6447 IEM_MC_ADVANCE_RIP();
6448 IEM_MC_END();
6449 }
6450 break;
6451 }
6452 }
6453 return VINF_SUCCESS;
6454}
6455
6456
6457/** Opcode 0x82. */
6458 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
6459{
6460 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
6461 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
6462}
6463
6464
6465/** Opcode 0x83. */
6466FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
6467{
6468 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6469 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
6470 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6471
6472 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6473 {
6474 /*
6475 * Register target
6476 */
6477 IEMOP_HLP_NO_LOCK_PREFIX();
6478 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
6479 switch (pIemCpu->enmEffOpSize)
6480 {
6481 case IEMMODE_16BIT:
6482 {
6483 IEM_MC_BEGIN(3, 0);
6484 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6485 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
6486 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6487
6488 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6489 IEM_MC_REF_EFLAGS(pEFlags);
6490 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6491
6492 IEM_MC_ADVANCE_RIP();
6493 IEM_MC_END();
6494 break;
6495 }
6496
6497 case IEMMODE_32BIT:
6498 {
6499 IEM_MC_BEGIN(3, 0);
6500 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6501 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
6502 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6503
6504 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6505 IEM_MC_REF_EFLAGS(pEFlags);
6506 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6507
6508 IEM_MC_ADVANCE_RIP();
6509 IEM_MC_END();
6510 break;
6511 }
6512
6513 case IEMMODE_64BIT:
6514 {
6515 IEM_MC_BEGIN(3, 0);
6516 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6517 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
6518 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6519
6520 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6521 IEM_MC_REF_EFLAGS(pEFlags);
6522 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6523
6524 IEM_MC_ADVANCE_RIP();
6525 IEM_MC_END();
6526 break;
6527 }
6528 }
6529 }
6530 else
6531 {
6532 /*
6533 * Memory target.
6534 */
6535 uint32_t fAccess;
6536 if (pImpl->pfnLockedU16)
6537 fAccess = IEM_ACCESS_DATA_RW;
6538 else
6539 { /* CMP */
6540 IEMOP_HLP_NO_LOCK_PREFIX();
6541 fAccess = IEM_ACCESS_DATA_R;
6542 }
6543
6544 switch (pIemCpu->enmEffOpSize)
6545 {
6546 case IEMMODE_16BIT:
6547 {
6548 IEM_MC_BEGIN(3, 2);
6549 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6550 IEM_MC_ARG(uint16_t, u16Src, 1);
6551 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6552 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6553
6554 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6555 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
6556 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
6557 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6558 IEM_MC_FETCH_EFLAGS(EFlags);
6559 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6560 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6561 else
6562 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6563
6564 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6565 IEM_MC_COMMIT_EFLAGS(EFlags);
6566 IEM_MC_ADVANCE_RIP();
6567 IEM_MC_END();
6568 break;
6569 }
6570
6571 case IEMMODE_32BIT:
6572 {
6573 IEM_MC_BEGIN(3, 2);
6574 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6575 IEM_MC_ARG(uint32_t, u32Src, 1);
6576 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6577 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6578
6579 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6580 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
6581 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
6582 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6583 IEM_MC_FETCH_EFLAGS(EFlags);
6584 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6585 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6586 else
6587 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6588
6589 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6590 IEM_MC_COMMIT_EFLAGS(EFlags);
6591 IEM_MC_ADVANCE_RIP();
6592 IEM_MC_END();
6593 break;
6594 }
6595
6596 case IEMMODE_64BIT:
6597 {
6598 IEM_MC_BEGIN(3, 2);
6599 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6600 IEM_MC_ARG(uint64_t, u64Src, 1);
6601 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6602 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6603
6604 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6605 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
6606 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
6607 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6608 IEM_MC_FETCH_EFLAGS(EFlags);
6609 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6610 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6611 else
6612 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6613
6614 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6615 IEM_MC_COMMIT_EFLAGS(EFlags);
6616 IEM_MC_ADVANCE_RIP();
6617 IEM_MC_END();
6618 break;
6619 }
6620 }
6621 }
6622 return VINF_SUCCESS;
6623}
6624
6625
6626/** Opcode 0x84. */
6627FNIEMOP_DEF(iemOp_test_Eb_Gb)
6628{
6629 IEMOP_MNEMONIC("test Eb,Gb");
6630 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6631 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6632 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
6633}
6634
6635
6636/** Opcode 0x85. */
6637FNIEMOP_DEF(iemOp_test_Ev_Gv)
6638{
6639 IEMOP_MNEMONIC("test Ev,Gv");
6640 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6641 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6642 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
6643}
6644
6645
6646/** Opcode 0x86. */
6647FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
6648{
6649 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6650 IEMOP_MNEMONIC("xchg Eb,Gb");
6651
6652 /*
6653 * If rm is denoting a register, no more instruction bytes.
6654 */
6655 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6656 {
6657 IEMOP_HLP_NO_LOCK_PREFIX();
6658
6659 IEM_MC_BEGIN(0, 2);
6660 IEM_MC_LOCAL(uint8_t, uTmp1);
6661 IEM_MC_LOCAL(uint8_t, uTmp2);
6662
6663 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6664 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6665 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6666 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6667
6668 IEM_MC_ADVANCE_RIP();
6669 IEM_MC_END();
6670 }
6671 else
6672 {
6673 /*
6674 * We're accessing memory.
6675 */
6676 IEM_MC_BEGIN(2, 2);
6677 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
6678 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6679 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6680
6681 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6682 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6683 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6684 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
6685 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
6686
6687 IEM_MC_ADVANCE_RIP();
6688 IEM_MC_END();
6689 }
6690 return VINF_SUCCESS;
6691}
6692
6693
6694/** Opcode 0x87. */
6695FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
6696{
6697 IEMOP_MNEMONIC("xchg Ev,Gv");
6698 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6699
6700 /*
6701 * If rm is denoting a register, no more instruction bytes.
6702 */
6703 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6704 {
6705 IEMOP_HLP_NO_LOCK_PREFIX();
6706
6707 switch (pIemCpu->enmEffOpSize)
6708 {
6709 case IEMMODE_16BIT:
6710 IEM_MC_BEGIN(0, 2);
6711 IEM_MC_LOCAL(uint16_t, uTmp1);
6712 IEM_MC_LOCAL(uint16_t, uTmp2);
6713
6714 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6715 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6716 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6717 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6718
6719 IEM_MC_ADVANCE_RIP();
6720 IEM_MC_END();
6721 return VINF_SUCCESS;
6722
6723 case IEMMODE_32BIT:
6724 IEM_MC_BEGIN(0, 2);
6725 IEM_MC_LOCAL(uint32_t, uTmp1);
6726 IEM_MC_LOCAL(uint32_t, uTmp2);
6727
6728 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6729 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6730 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6731 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6732
6733 IEM_MC_ADVANCE_RIP();
6734 IEM_MC_END();
6735 return VINF_SUCCESS;
6736
6737 case IEMMODE_64BIT:
6738 IEM_MC_BEGIN(0, 2);
6739 IEM_MC_LOCAL(uint64_t, uTmp1);
6740 IEM_MC_LOCAL(uint64_t, uTmp2);
6741
6742 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6743 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6744 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6745 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6746
6747 IEM_MC_ADVANCE_RIP();
6748 IEM_MC_END();
6749 return VINF_SUCCESS;
6750
6751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6752 }
6753 }
6754 else
6755 {
6756 /*
6757 * We're accessing memory.
6758 */
6759 switch (pIemCpu->enmEffOpSize)
6760 {
6761 case IEMMODE_16BIT:
6762 IEM_MC_BEGIN(2, 2);
6763 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
6764 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6765 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6766
6767 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6768 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6769 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6770 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
6771 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
6772
6773 IEM_MC_ADVANCE_RIP();
6774 IEM_MC_END();
6775 return VINF_SUCCESS;
6776
6777 case IEMMODE_32BIT:
6778 IEM_MC_BEGIN(2, 2);
6779 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
6780 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6782
6783 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6784 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6785 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6786 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
6787 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
6788
6789 IEM_MC_ADVANCE_RIP();
6790 IEM_MC_END();
6791 return VINF_SUCCESS;
6792
6793 case IEMMODE_64BIT:
6794 IEM_MC_BEGIN(2, 2);
6795 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
6796 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6797 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6798
6799 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6800 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6801 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6802 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
6803 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
6804
6805 IEM_MC_ADVANCE_RIP();
6806 IEM_MC_END();
6807 return VINF_SUCCESS;
6808
6809 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6810 }
6811 }
6812}
6813
6814
6815/** Opcode 0x88. */
6816FNIEMOP_DEF(iemOp_mov_Eb_Gb)
6817{
6818 IEMOP_MNEMONIC("mov Eb,Gb");
6819
6820 uint8_t bRm;
6821 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6822 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6823
6824 /*
6825 * If rm is denoting a register, no more instruction bytes.
6826 */
6827 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6828 {
6829 IEM_MC_BEGIN(0, 1);
6830 IEM_MC_LOCAL(uint8_t, u8Value);
6831 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6832 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
6833 IEM_MC_ADVANCE_RIP();
6834 IEM_MC_END();
6835 }
6836 else
6837 {
6838 /*
6839 * We're writing a register to memory.
6840 */
6841 IEM_MC_BEGIN(0, 2);
6842 IEM_MC_LOCAL(uint8_t, u8Value);
6843 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6844 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6845 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6846 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
6847 IEM_MC_ADVANCE_RIP();
6848 IEM_MC_END();
6849 }
6850 return VINF_SUCCESS;
6851
6852}
6853
6854
6855/** Opcode 0x89. */
6856FNIEMOP_DEF(iemOp_mov_Ev_Gv)
6857{
6858 IEMOP_MNEMONIC("mov Ev,Gv");
6859
6860 uint8_t bRm;
6861 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6862 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6863
6864 /*
6865 * If rm is denoting a register, no more instruction bytes.
6866 */
6867 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6868 {
6869 switch (pIemCpu->enmEffOpSize)
6870 {
6871 case IEMMODE_16BIT:
6872 IEM_MC_BEGIN(0, 1);
6873 IEM_MC_LOCAL(uint16_t, u16Value);
6874 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6875 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
6876 IEM_MC_ADVANCE_RIP();
6877 IEM_MC_END();
6878 break;
6879
6880 case IEMMODE_32BIT:
6881 IEM_MC_BEGIN(0, 1);
6882 IEM_MC_LOCAL(uint32_t, u32Value);
6883 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6884 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
6885 IEM_MC_ADVANCE_RIP();
6886 IEM_MC_END();
6887 break;
6888
6889 case IEMMODE_64BIT:
6890 IEM_MC_BEGIN(0, 1);
6891 IEM_MC_LOCAL(uint64_t, u64Value);
6892 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6893 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
6894 IEM_MC_ADVANCE_RIP();
6895 IEM_MC_END();
6896 break;
6897 }
6898 }
6899 else
6900 {
6901 /*
6902 * We're writing a register to memory.
6903 */
6904 switch (pIemCpu->enmEffOpSize)
6905 {
6906 case IEMMODE_16BIT:
6907 IEM_MC_BEGIN(0, 2);
6908 IEM_MC_LOCAL(uint16_t, u16Value);
6909 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6910 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6911 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6912 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
6913 IEM_MC_ADVANCE_RIP();
6914 IEM_MC_END();
6915 break;
6916
6917 case IEMMODE_32BIT:
6918 IEM_MC_BEGIN(0, 2);
6919 IEM_MC_LOCAL(uint32_t, u32Value);
6920 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6921 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6922 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6923 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
6924 IEM_MC_ADVANCE_RIP();
6925 IEM_MC_END();
6926 break;
6927
6928 case IEMMODE_64BIT:
6929 IEM_MC_BEGIN(0, 2);
6930 IEM_MC_LOCAL(uint64_t, u64Value);
6931 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6932 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6933 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6934 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
6935 IEM_MC_ADVANCE_RIP();
6936 IEM_MC_END();
6937 break;
6938 }
6939 }
6940 return VINF_SUCCESS;
6941}
6942
6943
6944/** Opcode 0x8a. */
6945FNIEMOP_DEF(iemOp_mov_Gb_Eb)
6946{
6947 IEMOP_MNEMONIC("mov Gb,Eb");
6948
6949 uint8_t bRm;
6950 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6951 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6952
6953 /*
6954 * If rm is denoting a register, no more instruction bytes.
6955 */
6956 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6957 {
6958 IEM_MC_BEGIN(0, 1);
6959 IEM_MC_LOCAL(uint8_t, u8Value);
6960 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6961 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
6962 IEM_MC_ADVANCE_RIP();
6963 IEM_MC_END();
6964 }
6965 else
6966 {
6967 /*
6968 * We're loading a register from memory.
6969 */
6970 IEM_MC_BEGIN(0, 2);
6971 IEM_MC_LOCAL(uint8_t, u8Value);
6972 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6973 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6974 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
6975 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
6976 IEM_MC_ADVANCE_RIP();
6977 IEM_MC_END();
6978 }
6979 return VINF_SUCCESS;
6980}
6981
6982
6983/** Opcode 0x8b. */
6984FNIEMOP_DEF(iemOp_mov_Gv_Ev)
6985{
6986 IEMOP_MNEMONIC("mov Gv,Ev");
6987
6988 uint8_t bRm;
6989 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
6990 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6991
6992 /*
6993 * If rm is denoting a register, no more instruction bytes.
6994 */
6995 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6996 {
6997 switch (pIemCpu->enmEffOpSize)
6998 {
6999 case IEMMODE_16BIT:
7000 IEM_MC_BEGIN(0, 1);
7001 IEM_MC_LOCAL(uint16_t, u16Value);
7002 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7003 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7004 IEM_MC_ADVANCE_RIP();
7005 IEM_MC_END();
7006 break;
7007
7008 case IEMMODE_32BIT:
7009 IEM_MC_BEGIN(0, 1);
7010 IEM_MC_LOCAL(uint32_t, u32Value);
7011 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7012 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7013 IEM_MC_ADVANCE_RIP();
7014 IEM_MC_END();
7015 break;
7016
7017 case IEMMODE_64BIT:
7018 IEM_MC_BEGIN(0, 1);
7019 IEM_MC_LOCAL(uint64_t, u64Value);
7020 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7021 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7022 IEM_MC_ADVANCE_RIP();
7023 IEM_MC_END();
7024 break;
7025 }
7026 }
7027 else
7028 {
7029 /*
7030 * We're loading a register from memory.
7031 */
7032 switch (pIemCpu->enmEffOpSize)
7033 {
7034 case IEMMODE_16BIT:
7035 IEM_MC_BEGIN(0, 2);
7036 IEM_MC_LOCAL(uint16_t, u16Value);
7037 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7038 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7039 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7040 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7041 IEM_MC_ADVANCE_RIP();
7042 IEM_MC_END();
7043 break;
7044
7045 case IEMMODE_32BIT:
7046 IEM_MC_BEGIN(0, 2);
7047 IEM_MC_LOCAL(uint32_t, u32Value);
7048 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7049 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7050 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7051 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7052 IEM_MC_ADVANCE_RIP();
7053 IEM_MC_END();
7054 break;
7055
7056 case IEMMODE_64BIT:
7057 IEM_MC_BEGIN(0, 2);
7058 IEM_MC_LOCAL(uint64_t, u64Value);
7059 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7060 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7061 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7062 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7063 IEM_MC_ADVANCE_RIP();
7064 IEM_MC_END();
7065 break;
7066 }
7067 }
7068 return VINF_SUCCESS;
7069}
7070
7071
7072/** Opcode 0x8c. */
7073FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7074{
7075 IEMOP_MNEMONIC("mov Ev,Sw");
7076
7077 uint8_t bRm;
7078 IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
7079 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7080
7081 /*
7082 * Check that the destination register exists. The REX.R prefix is ignored.
7083 */
7084 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7085 if ( iSegReg > X86_SREG_GS)
7086 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7087
7088 /*
7089 * If rm is denoting a register, no more instruction bytes.
7090 * In that case, the operand size is respected and the upper bits are
7091 * cleared (starting with some pentium).
7092 */
7093 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7094 {
7095 switch (pIemCpu->enmEffOpSize)
7096 {
7097 case IEMMODE_16BIT:
7098 IEM_MC_BEGIN(0, 1);
7099 IEM_MC_LOCAL(uint16_t, u16Value);
7100 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7101 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7102 IEM_MC_ADVANCE_RIP();
7103 IEM_MC_END();
7104 break;
7105
7106 case IEMMODE_32BIT:
7107 IEM_MC_BEGIN(0, 1);
7108 IEM_MC_LOCAL(uint32_t, u32Value);
7109 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7110 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7111 IEM_MC_ADVANCE_RIP();
7112 IEM_MC_END();
7113 break;
7114
7115 case IEMMODE_64BIT:
7116 IEM_MC_BEGIN(0, 1);
7117 IEM_MC_LOCAL(uint64_t, u64Value);
7118 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7119 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7120 IEM_MC_ADVANCE_RIP();
7121 IEM_MC_END();
7122 break;
7123 }
7124 }
7125 else
7126 {
7127 /*
7128 * We're saving the register to memory. The access is word sized
7129 * regardless of operand size prefixes.
7130 */
7131#if 0 /* not necessary */
7132 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7133#endif
7134 IEM_MC_BEGIN(0, 2);
7135 IEM_MC_LOCAL(uint16_t, u16Value);
7136 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7137 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7138 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7139 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7140 IEM_MC_ADVANCE_RIP();
7141 IEM_MC_END();
7142 }
7143 return VINF_SUCCESS;
7144}
7145
7146
7147
7148
7149/** Opcode 0x8d. */
7150FNIEMOP_DEF(iemOp_lea_Gv_M)
7151{
7152 IEMOP_MNEMONIC("lea Gv,M");
7153 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
7154 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7155 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7156 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7157
7158 switch (pIemCpu->enmEffOpSize)
7159 {
7160 case IEMMODE_16BIT:
7161 IEM_MC_BEGIN(0, 1);
7162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7163 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7164 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7165 IEM_MC_ADVANCE_RIP();
7166 IEM_MC_END();
7167 return VINF_SUCCESS;
7168
7169 case IEMMODE_32BIT:
7170 IEM_MC_BEGIN(0, 1);
7171 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7172 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7173 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7174 IEM_MC_ADVANCE_RIP();
7175 IEM_MC_END();
7176 return VINF_SUCCESS;
7177
7178 case IEMMODE_64BIT:
7179 IEM_MC_BEGIN(0, 1);
7180 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7181 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7182 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7183 IEM_MC_ADVANCE_RIP();
7184 IEM_MC_END();
7185 return VINF_SUCCESS;
7186 }
7187 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7188}
7189
7190
7191/** Opcode 0x8e. */
7192FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7193{
7194 IEMOP_MNEMONIC("mov Sw,Ev");
7195
7196 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
7197 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7198
7199 /*
7200 * The practical operand size is 16-bit.
7201 */
7202#if 0 /* not necessary */
7203 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7204#endif
7205
7206 /*
7207 * Check that the destination register exists and can be used with this
7208 * instruction. The REX.R prefix is ignored.
7209 */
7210 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7211 if ( iSegReg == X86_SREG_CS
7212 || iSegReg > X86_SREG_GS)
7213 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7214
7215 /*
7216 * If rm is denoting a register, no more instruction bytes.
7217 */
7218 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7219 {
7220 IEM_MC_BEGIN(2, 0);
7221 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7222 IEM_MC_ARG(uint16_t, u16Value, 1);
7223 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7224 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7225 IEM_MC_END();
7226 }
7227 else
7228 {
7229 /*
7230 * We're loading the register from memory. The access is word sized
7231 * regardless of operand size prefixes.
7232 */
7233 IEM_MC_BEGIN(2, 1);
7234 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7235 IEM_MC_ARG(uint16_t, u16Value, 1);
7236 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7237 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7238 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7239 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7240 IEM_MC_END();
7241 }
7242 return VINF_SUCCESS;
7243}
7244
7245
7246/** Opcode 0x8f. */
7247FNIEMOP_DEF(iemOp_pop_Ev)
7248{
7249 /* This bugger is rather annoying as it requires rSP to be updated before
7250 doing the effective address calculations. Will eventually require a
7251 split between the R/M+SIB decoding and the effective address
7252 calculation - which is something that is required for any attempt at
7253 reusing this code for a recompiler. It may also be good to have if we
7254 need to delay #UD exception caused by invalid lock prefixes.
7255
7256 For now, we'll do a mostly safe interpreter-only implementation here. */
7257 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7258 * now until tests show it's checked.. */
7259 IEMOP_MNEMONIC("pop Ev");
7260 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
7261 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7262
7263 /* Register access is relatively easy and can share code. */
7264 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7265 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7266
7267 /*
7268 * Memory target.
7269 *
7270 * Intel says that RSP is incremented before it's used in any effective
7271 * address calcuations. This means some serious extra annoyance here since
7272 * we decode and caclulate the effective address in one step and like to
7273 * delay committing registers till everything is done.
7274 *
7275 * So, we'll decode and calculate the effective address twice. This will
7276 * require some recoding if turned into a recompiler.
7277 */
7278 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7279
7280 /* Calc effective address with modified ESP. */
7281 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7282 RTGCPTR GCPtrEff;
7283 VBOXSTRICTRC rcStrict;
7284 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7285 if (rcStrict != VINF_SUCCESS)
7286 return rcStrict;
7287 pIemCpu->offOpcode = offOpcodeSaved;
7288
7289 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7290 uint64_t const RspSaved = pCtx->rsp;
7291 switch (pIemCpu->enmEffOpSize)
7292 {
7293 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7294 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7295 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7296 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7297 }
7298 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7299 Assert(rcStrict == VINF_SUCCESS);
7300 pCtx->rsp = RspSaved;
7301
7302 /* Perform the operation - this should be CImpl. */
7303 RTUINT64U TmpRsp;
7304 TmpRsp.u = pCtx->rsp;
7305 switch (pIemCpu->enmEffOpSize)
7306 {
7307 case IEMMODE_16BIT:
7308 {
7309 uint16_t u16Value;
7310 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7311 if (rcStrict == VINF_SUCCESS)
7312 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7313 break;
7314 }
7315
7316 case IEMMODE_32BIT:
7317 {
7318 uint32_t u32Value;
7319 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7320 if (rcStrict == VINF_SUCCESS)
7321 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7322 break;
7323 }
7324
7325 case IEMMODE_64BIT:
7326 {
7327 uint64_t u64Value;
7328 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7329 if (rcStrict == VINF_SUCCESS)
7330 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7331 break;
7332 }
7333
7334 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7335 }
7336 if (rcStrict == VINF_SUCCESS)
7337 {
7338 pCtx->rsp = TmpRsp.u;
7339 iemRegUpdateRip(pIemCpu);
7340 }
7341 return rcStrict;
7342}
7343
7344
7345/**
7346 * Common 'xchg reg,rAX' helper.
7347 */
7348FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7349{
7350 IEMOP_HLP_NO_LOCK_PREFIX();
7351
7352 iReg |= pIemCpu->uRexB;
7353 switch (pIemCpu->enmEffOpSize)
7354 {
7355 case IEMMODE_16BIT:
7356 IEM_MC_BEGIN(0, 2);
7357 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7358 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7359 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7360 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7361 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7362 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7363 IEM_MC_ADVANCE_RIP();
7364 IEM_MC_END();
7365 return VINF_SUCCESS;
7366
7367 case IEMMODE_32BIT:
7368 IEM_MC_BEGIN(0, 2);
7369 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7370 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7371 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7372 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7373 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7374 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7375 IEM_MC_ADVANCE_RIP();
7376 IEM_MC_END();
7377 return VINF_SUCCESS;
7378
7379 case IEMMODE_64BIT:
7380 IEM_MC_BEGIN(0, 2);
7381 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7382 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7383 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7384 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7385 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7386 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7387 IEM_MC_ADVANCE_RIP();
7388 IEM_MC_END();
7389 return VINF_SUCCESS;
7390
7391 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7392 }
7393}
7394
7395
7396/** Opcode 0x90. */
7397FNIEMOP_DEF(iemOp_nop)
7398{
7399 /* R8/R8D and RAX/EAX can be exchanged. */
7400 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7401 {
7402 IEMOP_MNEMONIC("xchg r8,rAX");
7403 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7404 }
7405
7406 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7407 IEMOP_MNEMONIC("pause");
7408 else
7409 IEMOP_MNEMONIC("nop");
7410 IEM_MC_BEGIN(0, 0);
7411 IEM_MC_ADVANCE_RIP();
7412 IEM_MC_END();
7413 return VINF_SUCCESS;
7414}
7415
7416
7417/** Opcode 0x91. */
7418FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7419{
7420 IEMOP_MNEMONIC("xchg rCX,rAX");
7421 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7422}
7423
7424
7425/** Opcode 0x92. */
7426FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7427{
7428 IEMOP_MNEMONIC("xchg rDX,rAX");
7429 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7430}
7431
7432
7433/** Opcode 0x93. */
7434FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7435{
7436 IEMOP_MNEMONIC("xchg rBX,rAX");
7437 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7438}
7439
7440
7441/** Opcode 0x94. */
7442FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7443{
7444 IEMOP_MNEMONIC("xchg rSX,rAX");
7445 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
7446}
7447
7448
7449/** Opcode 0x95. */
7450FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
7451{
7452 IEMOP_MNEMONIC("xchg rBP,rAX");
7453 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
7454}
7455
7456
7457/** Opcode 0x96. */
7458FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
7459{
7460 IEMOP_MNEMONIC("xchg rSI,rAX");
7461 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
7462}
7463
7464
7465/** Opcode 0x97. */
7466FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
7467{
7468 IEMOP_MNEMONIC("xchg rDI,rAX");
7469 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
7470}
7471
7472
7473/** Opcode 0x98. */
7474FNIEMOP_STUB(iemOp_cbw);
7475
7476
7477/** Opcode 0x99. */
7478FNIEMOP_DEF(iemOp_cwd)
7479{
7480 IEMOP_HLP_NO_LOCK_PREFIX();
7481 switch (pIemCpu->enmEffOpSize)
7482 {
7483 case IEMMODE_16BIT:
7484 IEMOP_MNEMONIC("cwd");
7485 IEM_MC_BEGIN(0, 1);
7486 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7487 IEM_MC_STORE_GREG_U16(X86_GREG_xDX, UINT16_C(0xffff));
7488 } IEM_MC_ELSE() {
7489 IEM_MC_STORE_GREG_U16(X86_GREG_xDX, 0);
7490 } IEM_MC_ENDIF();
7491 IEM_MC_ADVANCE_RIP();
7492 IEM_MC_END();
7493 return VINF_SUCCESS;
7494
7495 case IEMMODE_32BIT:
7496 IEMOP_MNEMONIC("cwq");
7497 IEM_MC_BEGIN(0, 1);
7498 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7499 IEM_MC_STORE_GREG_U32(X86_GREG_xDX, UINT32_C(0xffffffff));
7500 } IEM_MC_ELSE() {
7501 IEM_MC_STORE_GREG_U32(X86_GREG_xDX, 0);
7502 } IEM_MC_ENDIF();
7503 IEM_MC_ADVANCE_RIP();
7504 IEM_MC_END();
7505 return VINF_SUCCESS;
7506
7507 case IEMMODE_64BIT:
7508 IEMOP_MNEMONIC("cqo");
7509 IEM_MC_BEGIN(0, 1);
7510 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
7511 IEM_MC_STORE_GREG_U64(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
7512 } IEM_MC_ELSE() {
7513 IEM_MC_STORE_GREG_U64(X86_GREG_xDX, 0);
7514 } IEM_MC_ENDIF();
7515 IEM_MC_ADVANCE_RIP();
7516 IEM_MC_END();
7517 return VINF_SUCCESS;
7518
7519 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7520 }
7521}
7522
7523
7524/** Opcode 0x9a. */
7525FNIEMOP_STUB(iemOp_call_Ap);
7526
7527
7528/** Opcode 0x9b. (aka fwait) */
7529FNIEMOP_DEF(iemOp_wait)
7530{
7531 IEMOP_MNEMONIC("wait");
7532 IEMOP_HLP_NO_LOCK_PREFIX();
7533
7534 IEM_MC_BEGIN(0, 0);
7535 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
7536 IEM_MC_MAYBE_RAISE_FPU_XCPT();
7537 IEM_MC_ADVANCE_RIP();
7538 IEM_MC_END();
7539 return VINF_SUCCESS;
7540}
7541
7542
7543/** Opcode 0x9c. */
7544FNIEMOP_DEF(iemOp_pushf_Fv)
7545{
7546 IEMOP_HLP_NO_LOCK_PREFIX();
7547 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7548 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
7549}
7550
7551
7552/** Opcode 0x9d. */
7553FNIEMOP_DEF(iemOp_popf_Fv)
7554{
7555 IEMOP_HLP_NO_LOCK_PREFIX();
7556 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7557 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
7558}
7559
7560
7561/** Opcode 0x9e. */
7562FNIEMOP_STUB(iemOp_sahf);
7563/** Opcode 0x9f. */
7564FNIEMOP_STUB(iemOp_lahf);
7565
7566/**
7567 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
7568 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
7569 * prefixes. Will return on failures.
7570 * @param a_GCPtrMemOff The variable to store the offset in.
7571 */
7572#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
7573 do \
7574 { \
7575 switch (pIemCpu->enmEffAddrMode) \
7576 { \
7577 case IEMMODE_16BIT: \
7578 { \
7579 uint16_t u16Off; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Off); \
7580 (a_GCPtrMemOff) = u16Off; \
7581 break; \
7582 } \
7583 case IEMMODE_32BIT: \
7584 { \
7585 uint32_t u32Off; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Off); \
7586 (a_GCPtrMemOff) = u32Off; \
7587 break; \
7588 } \
7589 case IEMMODE_64BIT: \
7590 IEM_OPCODE_GET_NEXT_U64(pIemCpu, &(a_GCPtrMemOff)); \
7591 break; \
7592 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
7593 } \
7594 IEMOP_HLP_NO_LOCK_PREFIX(); \
7595 } while (0)
7596
7597/** Opcode 0xa0. */
7598FNIEMOP_DEF(iemOp_mov_Al_Ob)
7599{
7600 /*
7601 * Get the offset and fend of lock prefixes.
7602 */
7603 RTGCPTR GCPtrMemOff;
7604 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7605
7606 /*
7607 * Fetch AL.
7608 */
7609 IEM_MC_BEGIN(0,1);
7610 IEM_MC_LOCAL(uint8_t, u8Tmp);
7611 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7612 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
7613 IEM_MC_ADVANCE_RIP();
7614 IEM_MC_END();
7615 return VINF_SUCCESS;
7616}
7617
7618
7619/** Opcode 0xa1. */
7620FNIEMOP_DEF(iemOp_mov_rAX_Ov)
7621{
7622 /*
7623 * Get the offset and fend of lock prefixes.
7624 */
7625 IEMOP_MNEMONIC("mov rAX,Ov");
7626 RTGCPTR GCPtrMemOff;
7627 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7628
7629 /*
7630 * Fetch rAX.
7631 */
7632 switch (pIemCpu->enmEffOpSize)
7633 {
7634 case IEMMODE_16BIT:
7635 IEM_MC_BEGIN(0,1);
7636 IEM_MC_LOCAL(uint16_t, u16Tmp);
7637 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7638 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
7639 IEM_MC_ADVANCE_RIP();
7640 IEM_MC_END();
7641 return VINF_SUCCESS;
7642
7643 case IEMMODE_32BIT:
7644 IEM_MC_BEGIN(0,1);
7645 IEM_MC_LOCAL(uint32_t, u32Tmp);
7646 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7647 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
7648 IEM_MC_ADVANCE_RIP();
7649 IEM_MC_END();
7650 return VINF_SUCCESS;
7651
7652 case IEMMODE_64BIT:
7653 IEM_MC_BEGIN(0,1);
7654 IEM_MC_LOCAL(uint64_t, u64Tmp);
7655 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7656 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
7657 IEM_MC_ADVANCE_RIP();
7658 IEM_MC_END();
7659 return VINF_SUCCESS;
7660
7661 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7662 }
7663}
7664
7665
7666/** Opcode 0xa2. */
7667FNIEMOP_DEF(iemOp_mov_Ob_AL)
7668{
7669 /*
7670 * Get the offset and fend of lock prefixes.
7671 */
7672 RTGCPTR GCPtrMemOff;
7673 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7674
7675 /*
7676 * Store AL.
7677 */
7678 IEM_MC_BEGIN(0,1);
7679 IEM_MC_LOCAL(uint8_t, u8Tmp);
7680 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
7681 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
7682 IEM_MC_ADVANCE_RIP();
7683 IEM_MC_END();
7684 return VINF_SUCCESS;
7685}
7686
7687
7688/** Opcode 0xa3. */
7689FNIEMOP_DEF(iemOp_mov_Ov_rAX)
7690{
7691 /*
7692 * Get the offset and fend of lock prefixes.
7693 */
7694 RTGCPTR GCPtrMemOff;
7695 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7696
7697 /*
7698 * Store rAX.
7699 */
7700 switch (pIemCpu->enmEffOpSize)
7701 {
7702 case IEMMODE_16BIT:
7703 IEM_MC_BEGIN(0,1);
7704 IEM_MC_LOCAL(uint16_t, u16Tmp);
7705 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
7706 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
7707 IEM_MC_ADVANCE_RIP();
7708 IEM_MC_END();
7709 return VINF_SUCCESS;
7710
7711 case IEMMODE_32BIT:
7712 IEM_MC_BEGIN(0,1);
7713 IEM_MC_LOCAL(uint32_t, u32Tmp);
7714 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
7715 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
7716 IEM_MC_ADVANCE_RIP();
7717 IEM_MC_END();
7718 return VINF_SUCCESS;
7719
7720 case IEMMODE_64BIT:
7721 IEM_MC_BEGIN(0,1);
7722 IEM_MC_LOCAL(uint64_t, u64Tmp);
7723 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
7724 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
7725 IEM_MC_ADVANCE_RIP();
7726 IEM_MC_END();
7727 return VINF_SUCCESS;
7728
7729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7730 }
7731}
7732
7733/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
7734#define IEM_MOVS_CASE(ValBits, AddrBits) \
7735 IEM_MC_BEGIN(0, 2); \
7736 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
7737 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \
7738 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xSI); \
7739 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
7740 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xDI); \
7741 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
7742 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
7743 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7744 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7745 } IEM_MC_ELSE() { \
7746 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7747 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7748 } IEM_MC_ENDIF(); \
7749 IEM_MC_ADVANCE_RIP(); \
7750 IEM_MC_END();
7751
7752/** Opcode 0xa4. */
7753FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
7754{
7755 IEMOP_HLP_NO_LOCK_PREFIX();
7756
7757 /*
7758 * Use the C implementation if a repeat prefix is encountered.
7759 */
7760 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
7761 {
7762 IEMOP_MNEMONIC("rep movsb Xb,Yb");
7763 switch (pIemCpu->enmEffAddrMode)
7764 {
7765 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
7766 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
7767 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
7768 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7769 }
7770 }
7771 IEMOP_MNEMONIC("movsb Xb,Yb");
7772
7773 /*
7774 * Sharing case implementation with movs[wdq] below.
7775 */
7776 switch (pIemCpu->enmEffAddrMode)
7777 {
7778 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
7779 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
7780 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
7781 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7782 }
7783 return VINF_SUCCESS;
7784}
7785
7786
7787/** Opcode 0xa5. */
7788FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
7789{
7790 IEMOP_HLP_NO_LOCK_PREFIX();
7791
7792 /*
7793 * Use the C implementation if a repeat prefix is encountered.
7794 */
7795 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
7796 {
7797 IEMOP_MNEMONIC("rep movs Xv,Yv");
7798 switch (pIemCpu->enmEffOpSize)
7799 {
7800 case IEMMODE_16BIT:
7801 switch (pIemCpu->enmEffAddrMode)
7802 {
7803 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
7804 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
7805 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
7806 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7807 }
7808 break;
7809 case IEMMODE_32BIT:
7810 switch (pIemCpu->enmEffAddrMode)
7811 {
7812 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
7813 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
7814 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
7815 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7816 }
7817 case IEMMODE_64BIT:
7818 switch (pIemCpu->enmEffAddrMode)
7819 {
7820 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7821 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
7822 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
7823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7824 }
7825 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7826 }
7827 }
7828 IEMOP_MNEMONIC("movs Xv,Yv");
7829
7830 /*
7831 * Annoying double switch here.
7832 * Using ugly macro for implementing the cases, sharing it with movsb.
7833 */
7834 switch (pIemCpu->enmEffOpSize)
7835 {
7836 case IEMMODE_16BIT:
7837 switch (pIemCpu->enmEffAddrMode)
7838 {
7839 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
7840 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
7841 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
7842 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7843 }
7844 break;
7845
7846 case IEMMODE_32BIT:
7847 switch (pIemCpu->enmEffAddrMode)
7848 {
7849 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
7850 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
7851 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
7852 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7853 }
7854 break;
7855
7856 case IEMMODE_64BIT:
7857 switch (pIemCpu->enmEffAddrMode)
7858 {
7859 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
7860 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
7861 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
7862 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7863 }
7864 break;
7865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7866 }
7867 return VINF_SUCCESS;
7868}
7869
7870#undef IEM_MOVS_CASE
7871
7872/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
7873#define IEM_CMPS_CASE(ValBits, AddrBits) \
7874 IEM_MC_BEGIN(3, 3); \
7875 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
7876 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
7877 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
7878 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
7879 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \
7880 \
7881 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xSI); \
7882 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
7883 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xDI); \
7884 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
7885 IEM_MC_REF_LOCAL(puValue1, uValue1); \
7886 IEM_MC_REF_EFLAGS(pEFlags); \
7887 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
7888 \
7889 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
7890 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7891 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7892 } IEM_MC_ELSE() { \
7893 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7894 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7895 } IEM_MC_ENDIF(); \
7896 IEM_MC_ADVANCE_RIP(); \
7897 IEM_MC_END(); \
7898
7899/** Opcode 0xa6. */
7900FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
7901{
7902 IEMOP_HLP_NO_LOCK_PREFIX();
7903
7904 /*
7905 * Use the C implementation if a repeat prefix is encountered.
7906 */
7907 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
7908 {
7909 IEMOP_MNEMONIC("repe cmps Xb,Yb");
7910 switch (pIemCpu->enmEffAddrMode)
7911 {
7912 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
7913 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
7914 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
7915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7916 }
7917 }
7918 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
7919 {
7920 IEMOP_MNEMONIC("repe cmps Xb,Yb");
7921 switch (pIemCpu->enmEffAddrMode)
7922 {
7923 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
7924 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
7925 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
7926 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7927 }
7928 }
7929 IEMOP_MNEMONIC("cmps Xb,Yb");
7930
7931 /*
7932 * Sharing case implementation with cmps[wdq] below.
7933 */
7934 switch (pIemCpu->enmEffAddrMode)
7935 {
7936 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
7937 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
7938 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
7939 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7940 }
7941 return VINF_SUCCESS;
7942
7943}
7944
7945
7946/** Opcode 0xa7. */
7947FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
7948{
7949 IEMOP_HLP_NO_LOCK_PREFIX();
7950
7951 /*
7952 * Use the C implementation if a repeat prefix is encountered.
7953 */
7954 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
7955 {
7956 IEMOP_MNEMONIC("repe cmps Xv,Yv");
7957 switch (pIemCpu->enmEffOpSize)
7958 {
7959 case IEMMODE_16BIT:
7960 switch (pIemCpu->enmEffAddrMode)
7961 {
7962 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
7963 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
7964 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
7965 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7966 }
7967 break;
7968 case IEMMODE_32BIT:
7969 switch (pIemCpu->enmEffAddrMode)
7970 {
7971 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
7972 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
7973 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
7974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7975 }
7976 case IEMMODE_64BIT:
7977 switch (pIemCpu->enmEffAddrMode)
7978 {
7979 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7980 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
7981 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
7982 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7983 }
7984 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7985 }
7986 }
7987
7988 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
7989 {
7990 IEMOP_MNEMONIC("repne cmps Xv,Yv");
7991 switch (pIemCpu->enmEffOpSize)
7992 {
7993 case IEMMODE_16BIT:
7994 switch (pIemCpu->enmEffAddrMode)
7995 {
7996 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
7997 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
7998 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
7999 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8000 }
8001 break;
8002 case IEMMODE_32BIT:
8003 switch (pIemCpu->enmEffAddrMode)
8004 {
8005 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8006 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8007 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8009 }
8010 case IEMMODE_64BIT:
8011 switch (pIemCpu->enmEffAddrMode)
8012 {
8013 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8014 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8015 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8016 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8017 }
8018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8019 }
8020 }
8021
8022 IEMOP_MNEMONIC("cmps Xv,Yv");
8023
8024 /*
8025 * Annoying double switch here.
8026 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8027 */
8028 switch (pIemCpu->enmEffOpSize)
8029 {
8030 case IEMMODE_16BIT:
8031 switch (pIemCpu->enmEffAddrMode)
8032 {
8033 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8034 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8035 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8036 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8037 }
8038 break;
8039
8040 case IEMMODE_32BIT:
8041 switch (pIemCpu->enmEffAddrMode)
8042 {
8043 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8044 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8045 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8046 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8047 }
8048 break;
8049
8050 case IEMMODE_64BIT:
8051 switch (pIemCpu->enmEffAddrMode)
8052 {
8053 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8054 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8055 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8056 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8057 }
8058 break;
8059 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8060 }
8061 return VINF_SUCCESS;
8062
8063}
8064
8065#undef IEM_CMPS_CASE
8066
8067/** Opcode 0xa8. */
8068FNIEMOP_DEF(iemOp_test_AL_Ib)
8069{
8070 IEMOP_MNEMONIC("test al,Ib");
8071 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8072 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8073}
8074
8075
8076/** Opcode 0xa9. */
8077FNIEMOP_DEF(iemOp_test_eAX_Iz)
8078{
8079 IEMOP_MNEMONIC("test rAX,Iz");
8080 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8081 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8082}
8083
8084
8085/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8086#define IEM_STOS_CASE(ValBits, AddrBits) \
8087 IEM_MC_BEGIN(0, 2); \
8088 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8089 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \
8090 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8091 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xDI); \
8092 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8093 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8094 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8095 } IEM_MC_ELSE() { \
8096 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8097 } IEM_MC_ENDIF(); \
8098 IEM_MC_ADVANCE_RIP(); \
8099 IEM_MC_END(); \
8100
8101/** Opcode 0xaa. */
8102FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8103{
8104 IEMOP_HLP_NO_LOCK_PREFIX();
8105
8106 /*
8107 * Use the C implementation if a repeat prefix is encountered.
8108 */
8109 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8110 {
8111 IEMOP_MNEMONIC("rep stos Yb,al");
8112 switch (pIemCpu->enmEffAddrMode)
8113 {
8114 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8115 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8116 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8117 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8118 }
8119 }
8120 IEMOP_MNEMONIC("stos Yb,al");
8121
8122 /*
8123 * Sharing case implementation with stos[wdq] below.
8124 */
8125 switch (pIemCpu->enmEffAddrMode)
8126 {
8127 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8128 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8129 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8131 }
8132 return VINF_SUCCESS;
8133}
8134
8135
8136/** Opcode 0xab. */
8137FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8138{
8139 IEMOP_HLP_NO_LOCK_PREFIX();
8140
8141 /*
8142 * Use the C implementation if a repeat prefix is encountered.
8143 */
8144 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8145 {
8146 IEMOP_MNEMONIC("rep stos Yv,rAX");
8147 switch (pIemCpu->enmEffOpSize)
8148 {
8149 case IEMMODE_16BIT:
8150 switch (pIemCpu->enmEffAddrMode)
8151 {
8152 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8153 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8154 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8155 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8156 }
8157 break;
8158 case IEMMODE_32BIT:
8159 switch (pIemCpu->enmEffAddrMode)
8160 {
8161 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8162 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8163 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8164 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8165 }
8166 case IEMMODE_64BIT:
8167 switch (pIemCpu->enmEffAddrMode)
8168 {
8169 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8170 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8171 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8172 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8173 }
8174 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8175 }
8176 }
8177 IEMOP_MNEMONIC("stos Yv,rAX");
8178
8179 /*
8180 * Annoying double switch here.
8181 * Using ugly macro for implementing the cases, sharing it with stosb.
8182 */
8183 switch (pIemCpu->enmEffOpSize)
8184 {
8185 case IEMMODE_16BIT:
8186 switch (pIemCpu->enmEffAddrMode)
8187 {
8188 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8189 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8190 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8191 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8192 }
8193 break;
8194
8195 case IEMMODE_32BIT:
8196 switch (pIemCpu->enmEffAddrMode)
8197 {
8198 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8199 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8200 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8201 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8202 }
8203 break;
8204
8205 case IEMMODE_64BIT:
8206 switch (pIemCpu->enmEffAddrMode)
8207 {
8208 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8209 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8210 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8211 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8212 }
8213 break;
8214 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8215 }
8216 return VINF_SUCCESS;
8217}
8218
8219#undef IEM_STOS_CASE
8220
8221/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8222#define IEM_LODS_CASE(ValBits, AddrBits) \
8223 IEM_MC_BEGIN(0, 2); \
8224 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8225 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \
8226 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xSI); \
8227 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8228 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8229 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8230 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8231 } IEM_MC_ELSE() { \
8232 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8233 } IEM_MC_ENDIF(); \
8234 IEM_MC_ADVANCE_RIP(); \
8235 IEM_MC_END();
8236
8237/** Opcode 0xac. */
8238FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8239{
8240 IEMOP_HLP_NO_LOCK_PREFIX();
8241
8242 /*
8243 * Use the C implementation if a repeat prefix is encountered.
8244 */
8245 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8246 {
8247 IEMOP_MNEMONIC("rep lodsb al,Xb");
8248 switch (pIemCpu->enmEffAddrMode)
8249 {
8250 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8251 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8252 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8253 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8254 }
8255 }
8256 IEMOP_MNEMONIC("lodsb al,Xb");
8257
8258 /*
8259 * Sharing case implementation with stos[wdq] below.
8260 */
8261 switch (pIemCpu->enmEffAddrMode)
8262 {
8263 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8264 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8265 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8266 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8267 }
8268 return VINF_SUCCESS;
8269}
8270
8271
8272/** Opcode 0xad. */
8273FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8274{
8275 IEMOP_HLP_NO_LOCK_PREFIX();
8276
8277 /*
8278 * Use the C implementation if a repeat prefix is encountered.
8279 */
8280 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8281 {
8282 IEMOP_MNEMONIC("rep lods rAX,Xv");
8283 switch (pIemCpu->enmEffOpSize)
8284 {
8285 case IEMMODE_16BIT:
8286 switch (pIemCpu->enmEffAddrMode)
8287 {
8288 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8289 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8290 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8291 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8292 }
8293 break;
8294 case IEMMODE_32BIT:
8295 switch (pIemCpu->enmEffAddrMode)
8296 {
8297 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8298 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8299 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8301 }
8302 case IEMMODE_64BIT:
8303 switch (pIemCpu->enmEffAddrMode)
8304 {
8305 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8306 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8307 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8308 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8309 }
8310 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8311 }
8312 }
8313 IEMOP_MNEMONIC("lods rAX,Xv");
8314
8315 /*
8316 * Annoying double switch here.
8317 * Using ugly macro for implementing the cases, sharing it with lodsb.
8318 */
8319 switch (pIemCpu->enmEffOpSize)
8320 {
8321 case IEMMODE_16BIT:
8322 switch (pIemCpu->enmEffAddrMode)
8323 {
8324 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8325 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8326 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8327 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8328 }
8329 break;
8330
8331 case IEMMODE_32BIT:
8332 switch (pIemCpu->enmEffAddrMode)
8333 {
8334 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8335 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8336 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8337 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8338 }
8339 break;
8340
8341 case IEMMODE_64BIT:
8342 switch (pIemCpu->enmEffAddrMode)
8343 {
8344 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8345 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8346 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8348 }
8349 break;
8350 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8351 }
8352 return VINF_SUCCESS;
8353}
8354
8355#undef IEM_LODS_CASE
8356
8357/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
8358#define IEM_SCAS_CASE(ValBits, AddrBits) \
8359 IEM_MC_BEGIN(1, 2); \
8360 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
8361 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
8362 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8363 IEM_MC_LOCAL(uint##AddrBits##_t, uAddr); \
8364 \
8365 IEM_MC_FETCH_GREG_U##AddrBits(uAddr, X86_GREG_xDI); \
8366 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
8367 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
8368 IEM_MC_REF_EFLAGS(pEFlags); \
8369 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
8370 \
8371 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8372 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8373 } IEM_MC_ELSE() { \
8374 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8375 } IEM_MC_ENDIF(); \
8376 IEM_MC_ADVANCE_RIP(); \
8377 IEM_MC_END();
8378
8379/** Opcode 0xae. */
8380FNIEMOP_DEF(iemOp_scasb_AL_Xb)
8381{
8382 IEMOP_HLP_NO_LOCK_PREFIX();
8383
8384 /*
8385 * Use the C implementation if a repeat prefix is encountered.
8386 */
8387 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8388 {
8389 IEMOP_MNEMONIC("repe scasb al,Xb");
8390 switch (pIemCpu->enmEffAddrMode)
8391 {
8392 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
8393 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
8394 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
8395 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8396 }
8397 }
8398 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8399 {
8400 IEMOP_MNEMONIC("repne scasb al,Xb");
8401 switch (pIemCpu->enmEffAddrMode)
8402 {
8403 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
8404 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
8405 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
8406 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8407 }
8408 }
8409 IEMOP_MNEMONIC("scasb al,Xb");
8410
8411 /*
8412 * Sharing case implementation with stos[wdq] below.
8413 */
8414 switch (pIemCpu->enmEffAddrMode)
8415 {
8416 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
8417 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
8418 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
8419 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8420 }
8421 return VINF_SUCCESS;
8422}
8423
8424
8425/** Opcode 0xaf. */
8426FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
8427{
8428 IEMOP_HLP_NO_LOCK_PREFIX();
8429
8430 /*
8431 * Use the C implementation if a repeat prefix is encountered.
8432 */
8433 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8434 {
8435 IEMOP_MNEMONIC("repe scas rAX,Xv");
8436 switch (pIemCpu->enmEffOpSize)
8437 {
8438 case IEMMODE_16BIT:
8439 switch (pIemCpu->enmEffAddrMode)
8440 {
8441 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8442 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8443 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8444 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8445 }
8446 break;
8447 case IEMMODE_32BIT:
8448 switch (pIemCpu->enmEffAddrMode)
8449 {
8450 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8451 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8452 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8453 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8454 }
8455 case IEMMODE_64BIT:
8456 switch (pIemCpu->enmEffAddrMode)
8457 {
8458 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
8459 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8460 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8461 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8462 }
8463 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8464 }
8465 }
8466 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8467 {
8468 IEMOP_MNEMONIC("repne scas rAX,Xv");
8469 switch (pIemCpu->enmEffOpSize)
8470 {
8471 case IEMMODE_16BIT:
8472 switch (pIemCpu->enmEffAddrMode)
8473 {
8474 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8475 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8476 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8477 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8478 }
8479 break;
8480 case IEMMODE_32BIT:
8481 switch (pIemCpu->enmEffAddrMode)
8482 {
8483 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8484 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8485 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8486 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8487 }
8488 case IEMMODE_64BIT:
8489 switch (pIemCpu->enmEffAddrMode)
8490 {
8491 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8492 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8493 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8494 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8495 }
8496 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8497 }
8498 }
8499 IEMOP_MNEMONIC("scas rAX,Xv");
8500
8501 /*
8502 * Annoying double switch here.
8503 * Using ugly macro for implementing the cases, sharing it with scasb.
8504 */
8505 switch (pIemCpu->enmEffOpSize)
8506 {
8507 case IEMMODE_16BIT:
8508 switch (pIemCpu->enmEffAddrMode)
8509 {
8510 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
8511 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
8512 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
8513 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8514 }
8515 break;
8516
8517 case IEMMODE_32BIT:
8518 switch (pIemCpu->enmEffAddrMode)
8519 {
8520 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
8521 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
8522 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
8523 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8524 }
8525 break;
8526
8527 case IEMMODE_64BIT:
8528 switch (pIemCpu->enmEffAddrMode)
8529 {
8530 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8531 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
8532 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
8533 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8534 }
8535 break;
8536 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8537 }
8538 return VINF_SUCCESS;
8539}
8540
8541#undef IEM_SCAS_CASE
8542
8543/**
8544 * Common 'mov r8, imm8' helper.
8545 */
8546FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
8547{
8548 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
8549 IEMOP_HLP_NO_LOCK_PREFIX();
8550
8551 IEM_MC_BEGIN(0, 1);
8552 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
8553 IEM_MC_STORE_GREG_U8(iReg, u8Value);
8554 IEM_MC_ADVANCE_RIP();
8555 IEM_MC_END();
8556
8557 return VINF_SUCCESS;
8558}
8559
8560
8561/** Opcode 0xb0. */
8562FNIEMOP_DEF(iemOp_mov_AL_Ib)
8563{
8564 IEMOP_MNEMONIC("mov AL,Ib");
8565 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
8566}
8567
8568
8569/** Opcode 0xb1. */
8570FNIEMOP_DEF(iemOp_CL_Ib)
8571{
8572 IEMOP_MNEMONIC("mov CL,Ib");
8573 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
8574}
8575
8576
8577/** Opcode 0xb2. */
8578FNIEMOP_DEF(iemOp_DL_Ib)
8579{
8580 IEMOP_MNEMONIC("mov DL,Ib");
8581 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
8582}
8583
8584
8585/** Opcode 0xb3. */
8586FNIEMOP_DEF(iemOp_BL_Ib)
8587{
8588 IEMOP_MNEMONIC("mov BL,Ib");
8589 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
8590}
8591
8592
8593/** Opcode 0xb4. */
8594FNIEMOP_DEF(iemOp_mov_AH_Ib)
8595{
8596 IEMOP_MNEMONIC("mov AH,Ib");
8597 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
8598}
8599
8600
8601/** Opcode 0xb5. */
8602FNIEMOP_DEF(iemOp_CH_Ib)
8603{
8604 IEMOP_MNEMONIC("mov CH,Ib");
8605 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
8606}
8607
8608
8609/** Opcode 0xb6. */
8610FNIEMOP_DEF(iemOp_DH_Ib)
8611{
8612 IEMOP_MNEMONIC("mov DH,Ib");
8613 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
8614}
8615
8616
8617/** Opcode 0xb7. */
8618FNIEMOP_DEF(iemOp_BH_Ib)
8619{
8620 IEMOP_MNEMONIC("mov BH,Ib");
8621 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
8622}
8623
8624
8625/**
8626 * Common 'mov regX,immX' helper.
8627 */
8628FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
8629{
8630 switch (pIemCpu->enmEffOpSize)
8631 {
8632 case IEMMODE_16BIT:
8633 {
8634 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
8635 IEMOP_HLP_NO_LOCK_PREFIX();
8636
8637 IEM_MC_BEGIN(0, 1);
8638 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
8639 IEM_MC_STORE_GREG_U16(iReg, u16Value);
8640 IEM_MC_ADVANCE_RIP();
8641 IEM_MC_END();
8642 break;
8643 }
8644
8645 case IEMMODE_32BIT:
8646 {
8647 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
8648 IEMOP_HLP_NO_LOCK_PREFIX();
8649
8650 IEM_MC_BEGIN(0, 1);
8651 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
8652 IEM_MC_STORE_GREG_U32(iReg, u32Value);
8653 IEM_MC_ADVANCE_RIP();
8654 IEM_MC_END();
8655 break;
8656 }
8657 case IEMMODE_64BIT:
8658 {
8659 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(pIemCpu, &u64Imm);
8660 IEMOP_HLP_NO_LOCK_PREFIX();
8661
8662 IEM_MC_BEGIN(0, 1);
8663 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
8664 IEM_MC_STORE_GREG_U64(iReg, u64Value);
8665 IEM_MC_ADVANCE_RIP();
8666 IEM_MC_END();
8667 break;
8668 }
8669 }
8670
8671 return VINF_SUCCESS;
8672}
8673
8674
8675/** Opcode 0xb8. */
8676FNIEMOP_DEF(iemOp_eAX_Iv)
8677{
8678 IEMOP_MNEMONIC("mov rAX,IV");
8679 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
8680}
8681
8682
8683/** Opcode 0xb9. */
8684FNIEMOP_DEF(iemOp_eCX_Iv)
8685{
8686 IEMOP_MNEMONIC("mov rCX,IV");
8687 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
8688}
8689
8690
8691/** Opcode 0xba. */
8692FNIEMOP_DEF(iemOp_eDX_Iv)
8693{
8694 IEMOP_MNEMONIC("mov rDX,IV");
8695 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
8696}
8697
8698
8699/** Opcode 0xbb. */
8700FNIEMOP_DEF(iemOp_eBX_Iv)
8701{
8702 IEMOP_MNEMONIC("mov rBX,IV");
8703 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
8704}
8705
8706
8707/** Opcode 0xbc. */
8708FNIEMOP_DEF(iemOp_eSP_Iv)
8709{
8710 IEMOP_MNEMONIC("mov rSP,IV");
8711 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
8712}
8713
8714
8715/** Opcode 0xbd. */
8716FNIEMOP_DEF(iemOp_eBP_Iv)
8717{
8718 IEMOP_MNEMONIC("mov rBP,IV");
8719 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
8720}
8721
8722
8723/** Opcode 0xbe. */
8724FNIEMOP_DEF(iemOp_eSI_Iv)
8725{
8726 IEMOP_MNEMONIC("mov rSI,IV");
8727 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
8728}
8729
8730
8731/** Opcode 0xbf. */
8732FNIEMOP_DEF(iemOp_eDI_Iv)
8733{
8734 IEMOP_MNEMONIC("mov rDI,IV");
8735 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
8736}
8737
8738
8739/** Opcode 0xc0. */
8740FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
8741{
8742 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
8743 PCIEMOPSHIFTSIZES pImpl;
8744 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
8745 {
8746 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
8747 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
8748 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
8749 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
8750 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
8751 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
8752 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
8753 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8754 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
8755 }
8756 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
8757
8758 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8759 {
8760 /* register */
8761 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
8762 IEMOP_HLP_NO_LOCK_PREFIX();
8763 IEM_MC_BEGIN(3, 0);
8764 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
8765 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8766 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8767 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8768 IEM_MC_REF_EFLAGS(pEFlags);
8769 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
8770 IEM_MC_ADVANCE_RIP();
8771 IEM_MC_END();
8772 }
8773 else
8774 {
8775 /* memory */
8776 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8777 IEM_MC_BEGIN(3, 2);
8778 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
8779 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8780 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8782
8783 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8784 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
8785 IEM_MC_ASSIGN(cShiftArg, cShift);
8786 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8787 IEM_MC_FETCH_EFLAGS(EFlags);
8788 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
8789
8790 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
8791 IEM_MC_COMMIT_EFLAGS(EFlags);
8792 IEM_MC_ADVANCE_RIP();
8793 IEM_MC_END();
8794 }
8795 return VINF_SUCCESS;
8796}
8797
8798
8799/** Opcode 0xc1. */
8800FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
8801{
8802 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
8803 PCIEMOPSHIFTSIZES pImpl;
8804 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
8805 {
8806 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
8807 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
8808 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
8809 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
8810 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
8811 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
8812 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
8813 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8814 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
8815 }
8816 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
8817
8818 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8819 {
8820 /* register */
8821 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
8822 IEMOP_HLP_NO_LOCK_PREFIX();
8823 switch (pIemCpu->enmEffOpSize)
8824 {
8825 case IEMMODE_16BIT:
8826 IEM_MC_BEGIN(3, 0);
8827 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8828 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8829 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8830 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8831 IEM_MC_REF_EFLAGS(pEFlags);
8832 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
8833 IEM_MC_ADVANCE_RIP();
8834 IEM_MC_END();
8835 return VINF_SUCCESS;
8836
8837 case IEMMODE_32BIT:
8838 IEM_MC_BEGIN(3, 0);
8839 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8840 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8841 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8842 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8843 IEM_MC_REF_EFLAGS(pEFlags);
8844 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
8845 IEM_MC_ADVANCE_RIP();
8846 IEM_MC_END();
8847 return VINF_SUCCESS;
8848
8849 case IEMMODE_64BIT:
8850 IEM_MC_BEGIN(3, 0);
8851 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8852 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8853 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8854 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8855 IEM_MC_REF_EFLAGS(pEFlags);
8856 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
8857 IEM_MC_ADVANCE_RIP();
8858 IEM_MC_END();
8859 return VINF_SUCCESS;
8860
8861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8862 }
8863 }
8864 else
8865 {
8866 /* memory */
8867 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8868 switch (pIemCpu->enmEffOpSize)
8869 {
8870 case IEMMODE_16BIT:
8871 IEM_MC_BEGIN(3, 2);
8872 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8873 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8874 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8875 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8876
8877 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8878 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
8879 IEM_MC_ASSIGN(cShiftArg, cShift);
8880 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8881 IEM_MC_FETCH_EFLAGS(EFlags);
8882 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
8883
8884 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
8885 IEM_MC_COMMIT_EFLAGS(EFlags);
8886 IEM_MC_ADVANCE_RIP();
8887 IEM_MC_END();
8888 return VINF_SUCCESS;
8889
8890 case IEMMODE_32BIT:
8891 IEM_MC_BEGIN(3, 2);
8892 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8893 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8894 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8895 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8896
8897 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8898 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
8899 IEM_MC_ASSIGN(cShiftArg, cShift);
8900 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8901 IEM_MC_FETCH_EFLAGS(EFlags);
8902 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
8903
8904 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
8905 IEM_MC_COMMIT_EFLAGS(EFlags);
8906 IEM_MC_ADVANCE_RIP();
8907 IEM_MC_END();
8908 return VINF_SUCCESS;
8909
8910 case IEMMODE_64BIT:
8911 IEM_MC_BEGIN(3, 2);
8912 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8913 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8914 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8915 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8916
8917 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8918 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift);
8919 IEM_MC_ASSIGN(cShiftArg, cShift);
8920 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8921 IEM_MC_FETCH_EFLAGS(EFlags);
8922 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
8923
8924 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
8925 IEM_MC_COMMIT_EFLAGS(EFlags);
8926 IEM_MC_ADVANCE_RIP();
8927 IEM_MC_END();
8928 return VINF_SUCCESS;
8929
8930 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8931 }
8932 }
8933}
8934
8935
8936/** Opcode 0xc2. */
8937FNIEMOP_DEF(iemOp_retn_Iw)
8938{
8939 IEMOP_MNEMONIC("retn Iw");
8940 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
8941 IEMOP_HLP_NO_LOCK_PREFIX();
8942 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8943 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
8944}
8945
8946
8947/** Opcode 0xc3. */
8948FNIEMOP_DEF(iemOp_retn)
8949{
8950 IEMOP_MNEMONIC("retn");
8951 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8952 IEMOP_HLP_NO_LOCK_PREFIX();
8953 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
8954}
8955
8956
8957/** Opcode 0xc4. */
8958FNIEMOP_DEF(iemOp_les_Gv_Mp)
8959{
8960 IEMOP_MNEMONIC("les Gv,Mp");
8961 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
8962}
8963
8964
8965/** Opcode 0xc5. */
8966FNIEMOP_DEF(iemOp_lds_Gv_Mp)
8967{
8968 IEMOP_MNEMONIC("lds Gv,Mp");
8969 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
8970}
8971
8972
8973/** Opcode 0xc6. */
8974FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
8975{
8976 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
8977 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8978 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
8979 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8980 IEMOP_MNEMONIC("mov Eb,Ib");
8981
8982 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8983 {
8984 /* register access */
8985 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
8986 IEM_MC_BEGIN(0, 0);
8987 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
8988 IEM_MC_ADVANCE_RIP();
8989 IEM_MC_END();
8990 }
8991 else
8992 {
8993 /* memory access. */
8994 IEM_MC_BEGIN(0, 1);
8995 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8996 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8997 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
8998 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
8999 IEM_MC_ADVANCE_RIP();
9000 IEM_MC_END();
9001 }
9002 return VINF_SUCCESS;
9003}
9004
9005
9006/** Opcode 0xc7. */
9007FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9008{
9009 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9010 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9011 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9012 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9013 IEMOP_MNEMONIC("mov Ev,Iz");
9014
9015 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9016 {
9017 /* register access */
9018 switch (pIemCpu->enmEffOpSize)
9019 {
9020 case IEMMODE_16BIT:
9021 IEM_MC_BEGIN(0, 0);
9022 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
9023 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9024 IEM_MC_ADVANCE_RIP();
9025 IEM_MC_END();
9026 return VINF_SUCCESS;
9027
9028 case IEMMODE_32BIT:
9029 IEM_MC_BEGIN(0, 0);
9030 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
9031 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9032 IEM_MC_ADVANCE_RIP();
9033 IEM_MC_END();
9034 return VINF_SUCCESS;
9035
9036 case IEMMODE_64BIT:
9037 IEM_MC_BEGIN(0, 0);
9038 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(pIemCpu, &u64Imm);
9039 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9040 IEM_MC_ADVANCE_RIP();
9041 IEM_MC_END();
9042 return VINF_SUCCESS;
9043
9044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9045 }
9046 }
9047 else
9048 {
9049 /* memory access. */
9050 switch (pIemCpu->enmEffOpSize)
9051 {
9052 case IEMMODE_16BIT:
9053 IEM_MC_BEGIN(0, 1);
9054 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9055 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9056 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
9057 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9058 IEM_MC_ADVANCE_RIP();
9059 IEM_MC_END();
9060 return VINF_SUCCESS;
9061
9062 case IEMMODE_32BIT:
9063 IEM_MC_BEGIN(0, 1);
9064 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9065 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9066 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
9067 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9068 IEM_MC_ADVANCE_RIP();
9069 IEM_MC_END();
9070 return VINF_SUCCESS;
9071
9072 case IEMMODE_64BIT:
9073 IEM_MC_BEGIN(0, 1);
9074 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9075 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9076 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(pIemCpu, &u64Imm);
9077 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9078 IEM_MC_ADVANCE_RIP();
9079 IEM_MC_END();
9080 return VINF_SUCCESS;
9081
9082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9083 }
9084 }
9085}
9086
9087
9088
9089
9090/** Opcode 0xc8. */
9091FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9092
9093
9094/** Opcode 0xc9. */
9095FNIEMOP_DEF(iemOp_leave)
9096{
9097 IEMOP_MNEMONIC("retn");
9098 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9099 IEMOP_HLP_NO_LOCK_PREFIX();
9100 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9101}
9102
9103
9104/** Opcode 0xca. */
9105FNIEMOP_DEF(iemOp_retf_Iw)
9106{
9107 IEMOP_MNEMONIC("retf Iw");
9108 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
9109 IEMOP_HLP_NO_LOCK_PREFIX();
9110 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9111 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9112}
9113
9114
9115/** Opcode 0xcb. */
9116FNIEMOP_DEF(iemOp_retf)
9117{
9118 IEMOP_MNEMONIC("retf");
9119 IEMOP_HLP_NO_LOCK_PREFIX();
9120 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9121 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9122}
9123
9124
9125/** Opcode 0xcc. */
9126FNIEMOP_DEF(iemOp_int_3)
9127{
9128 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9129}
9130
9131
9132/** Opcode 0xcd. */
9133FNIEMOP_DEF(iemOp_int_Ib)
9134{
9135 uint8_t u8Int; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Int);
9136 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9137}
9138
9139
9140/** Opcode 0xce. */
9141FNIEMOP_DEF(iemOp_into)
9142{
9143 IEM_MC_BEGIN(2, 0);
9144 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9145 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9146 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9147 IEM_MC_END();
9148 return VINF_SUCCESS;
9149}
9150
9151
9152/** Opcode 0xcf. */
9153FNIEMOP_DEF(iemOp_iret)
9154{
9155 IEMOP_MNEMONIC("iret");
9156 IEMOP_HLP_NO_LOCK_PREFIX();
9157 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9158}
9159
9160
9161/** Opcode 0xd0. */
9162FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9163{
9164 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9165 PCIEMOPSHIFTSIZES pImpl;
9166 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9167 {
9168 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9169 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9170 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9171 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9172 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9173 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9174 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9175 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9176 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9177 }
9178 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9179
9180 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9181 {
9182 /* register */
9183 IEMOP_HLP_NO_LOCK_PREFIX();
9184 IEM_MC_BEGIN(3, 0);
9185 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9186 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9187 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9188 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9189 IEM_MC_REF_EFLAGS(pEFlags);
9190 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9191 IEM_MC_ADVANCE_RIP();
9192 IEM_MC_END();
9193 }
9194 else
9195 {
9196 /* memory */
9197 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9198 IEM_MC_BEGIN(3, 2);
9199 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9200 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9201 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9202 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9203
9204 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9205 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9206 IEM_MC_FETCH_EFLAGS(EFlags);
9207 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9208
9209 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9210 IEM_MC_COMMIT_EFLAGS(EFlags);
9211 IEM_MC_ADVANCE_RIP();
9212 IEM_MC_END();
9213 }
9214 return VINF_SUCCESS;
9215}
9216
9217
9218
9219/** Opcode 0xd1. */
9220FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9221{
9222 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9223 PCIEMOPSHIFTSIZES pImpl;
9224 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9225 {
9226 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9227 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9228 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9229 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9230 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9231 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9232 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9233 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9234 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9235 }
9236 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9237
9238 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9239 {
9240 /* register */
9241 IEMOP_HLP_NO_LOCK_PREFIX();
9242 switch (pIemCpu->enmEffOpSize)
9243 {
9244 case IEMMODE_16BIT:
9245 IEM_MC_BEGIN(3, 0);
9246 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9247 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9248 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9249 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9250 IEM_MC_REF_EFLAGS(pEFlags);
9251 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9252 IEM_MC_ADVANCE_RIP();
9253 IEM_MC_END();
9254 return VINF_SUCCESS;
9255
9256 case IEMMODE_32BIT:
9257 IEM_MC_BEGIN(3, 0);
9258 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9259 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9260 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9261 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9262 IEM_MC_REF_EFLAGS(pEFlags);
9263 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9264 IEM_MC_ADVANCE_RIP();
9265 IEM_MC_END();
9266 return VINF_SUCCESS;
9267
9268 case IEMMODE_64BIT:
9269 IEM_MC_BEGIN(3, 0);
9270 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9271 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9273 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9274 IEM_MC_REF_EFLAGS(pEFlags);
9275 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9276 IEM_MC_ADVANCE_RIP();
9277 IEM_MC_END();
9278 return VINF_SUCCESS;
9279
9280 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9281 }
9282 }
9283 else
9284 {
9285 /* memory */
9286 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9287 switch (pIemCpu->enmEffOpSize)
9288 {
9289 case IEMMODE_16BIT:
9290 IEM_MC_BEGIN(3, 2);
9291 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9292 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9293 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9294 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9295
9296 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9297 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9298 IEM_MC_FETCH_EFLAGS(EFlags);
9299 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9300
9301 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9302 IEM_MC_COMMIT_EFLAGS(EFlags);
9303 IEM_MC_ADVANCE_RIP();
9304 IEM_MC_END();
9305 return VINF_SUCCESS;
9306
9307 case IEMMODE_32BIT:
9308 IEM_MC_BEGIN(3, 2);
9309 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9310 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9311 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9312 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9313
9314 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9315 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9316 IEM_MC_FETCH_EFLAGS(EFlags);
9317 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9318
9319 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9320 IEM_MC_COMMIT_EFLAGS(EFlags);
9321 IEM_MC_ADVANCE_RIP();
9322 IEM_MC_END();
9323 return VINF_SUCCESS;
9324
9325 case IEMMODE_64BIT:
9326 IEM_MC_BEGIN(3, 2);
9327 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9328 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9329 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9330 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9331
9332 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9333 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9334 IEM_MC_FETCH_EFLAGS(EFlags);
9335 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9336
9337 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9338 IEM_MC_COMMIT_EFLAGS(EFlags);
9339 IEM_MC_ADVANCE_RIP();
9340 IEM_MC_END();
9341 return VINF_SUCCESS;
9342
9343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9344 }
9345 }
9346}
9347
9348
9349/** Opcode 0xd2. */
9350FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9351{
9352 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9353 PCIEMOPSHIFTSIZES pImpl;
9354 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9355 {
9356 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9357 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
9358 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
9359 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
9360 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
9361 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
9362 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
9363 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9364 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
9365 }
9366 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9367
9368 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9369 {
9370 /* register */
9371 IEMOP_HLP_NO_LOCK_PREFIX();
9372 IEM_MC_BEGIN(3, 0);
9373 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9374 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9375 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9376 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9377 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9378 IEM_MC_REF_EFLAGS(pEFlags);
9379 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9380 IEM_MC_ADVANCE_RIP();
9381 IEM_MC_END();
9382 }
9383 else
9384 {
9385 /* memory */
9386 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9387 IEM_MC_BEGIN(3, 2);
9388 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9389 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9390 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9391 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9392
9393 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9394 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9395 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9396 IEM_MC_FETCH_EFLAGS(EFlags);
9397 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9398
9399 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9400 IEM_MC_COMMIT_EFLAGS(EFlags);
9401 IEM_MC_ADVANCE_RIP();
9402 IEM_MC_END();
9403 }
9404 return VINF_SUCCESS;
9405}
9406
9407
9408/** Opcode 0xd3. */
9409FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
9410{
9411 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9412 PCIEMOPSHIFTSIZES pImpl;
9413 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9414 {
9415 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
9416 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
9417 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
9418 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
9419 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
9420 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
9421 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
9422 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9423 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9424 }
9425 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9426
9427 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9428 {
9429 /* register */
9430 IEMOP_HLP_NO_LOCK_PREFIX();
9431 switch (pIemCpu->enmEffOpSize)
9432 {
9433 case IEMMODE_16BIT:
9434 IEM_MC_BEGIN(3, 0);
9435 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9436 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9437 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9438 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9439 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9440 IEM_MC_REF_EFLAGS(pEFlags);
9441 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9442 IEM_MC_ADVANCE_RIP();
9443 IEM_MC_END();
9444 return VINF_SUCCESS;
9445
9446 case IEMMODE_32BIT:
9447 IEM_MC_BEGIN(3, 0);
9448 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9449 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9450 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9451 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9452 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9453 IEM_MC_REF_EFLAGS(pEFlags);
9454 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9455 IEM_MC_ADVANCE_RIP();
9456 IEM_MC_END();
9457 return VINF_SUCCESS;
9458
9459 case IEMMODE_64BIT:
9460 IEM_MC_BEGIN(3, 0);
9461 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9462 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9463 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9464 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9465 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9466 IEM_MC_REF_EFLAGS(pEFlags);
9467 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9468 IEM_MC_ADVANCE_RIP();
9469 IEM_MC_END();
9470 return VINF_SUCCESS;
9471
9472 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9473 }
9474 }
9475 else
9476 {
9477 /* memory */
9478 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9479 switch (pIemCpu->enmEffOpSize)
9480 {
9481 case IEMMODE_16BIT:
9482 IEM_MC_BEGIN(3, 2);
9483 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9484 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9485 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9486 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9487
9488 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9489 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9490 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9491 IEM_MC_FETCH_EFLAGS(EFlags);
9492 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9493
9494 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9495 IEM_MC_COMMIT_EFLAGS(EFlags);
9496 IEM_MC_ADVANCE_RIP();
9497 IEM_MC_END();
9498 return VINF_SUCCESS;
9499
9500 case IEMMODE_32BIT:
9501 IEM_MC_BEGIN(3, 2);
9502 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9503 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9504 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9505 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9506
9507 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9508 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9509 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9510 IEM_MC_FETCH_EFLAGS(EFlags);
9511 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9512
9513 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9514 IEM_MC_COMMIT_EFLAGS(EFlags);
9515 IEM_MC_ADVANCE_RIP();
9516 IEM_MC_END();
9517 return VINF_SUCCESS;
9518
9519 case IEMMODE_64BIT:
9520 IEM_MC_BEGIN(3, 2);
9521 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9522 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9523 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9524 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9525
9526 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9527 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9528 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9529 IEM_MC_FETCH_EFLAGS(EFlags);
9530 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9531
9532 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9533 IEM_MC_COMMIT_EFLAGS(EFlags);
9534 IEM_MC_ADVANCE_RIP();
9535 IEM_MC_END();
9536 return VINF_SUCCESS;
9537
9538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9539 }
9540 }
9541}
9542
9543/** Opcode 0xd4. */
9544FNIEMOP_STUB(iemOp_aam_Ib);
9545/** Opcode 0xd5. */
9546FNIEMOP_STUB(iemOp_aad_Ib);
9547
9548
9549/** Opcode 0xd7. */
9550FNIEMOP_DEF(iemOp_xlat)
9551{
9552 IEMOP_MNEMONIC("xlat");
9553 IEMOP_HLP_NO_LOCK_PREFIX();
9554 switch (pIemCpu->enmEffAddrMode)
9555 {
9556 case IEMMODE_16BIT:
9557 IEM_MC_BEGIN(2, 0);
9558 IEM_MC_LOCAL(uint8_t, u8Tmp);
9559 IEM_MC_LOCAL(uint16_t, u16Addr);
9560 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
9561 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
9562 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
9563 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9564 IEM_MC_ADVANCE_RIP();
9565 IEM_MC_END();
9566 return VINF_SUCCESS;
9567
9568 case IEMMODE_32BIT:
9569 IEM_MC_BEGIN(2, 0);
9570 IEM_MC_LOCAL(uint8_t, u8Tmp);
9571 IEM_MC_LOCAL(uint32_t, u32Addr);
9572 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
9573 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
9574 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
9575 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9576 IEM_MC_ADVANCE_RIP();
9577 IEM_MC_END();
9578 return VINF_SUCCESS;
9579
9580 case IEMMODE_64BIT:
9581 IEM_MC_BEGIN(2, 0);
9582 IEM_MC_LOCAL(uint8_t, u8Tmp);
9583 IEM_MC_LOCAL(uint64_t, u64Addr);
9584 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
9585 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
9586 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
9587 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9588 IEM_MC_ADVANCE_RIP();
9589 IEM_MC_END();
9590 return VINF_SUCCESS;
9591
9592 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9593 }
9594}
9595
9596
9597/** Opcode 0xd8. */
9598FNIEMOP_STUB(iemOp_EscF0);
9599/** Opcode 0xd9. */
9600FNIEMOP_STUB(iemOp_EscF1);
9601/** Opcode 0xda. */
9602FNIEMOP_STUB(iemOp_EscF2);
9603
9604
9605/** Opcode 0xdb /0. */
9606FNIEMOP_STUB_1(iemOp_fild_dw, uint8_t, bRm);
9607/** Opcode 0xdb /1. */
9608FNIEMOP_STUB_1(iemOp_fisttp_dw, uint8_t, bRm);
9609/** Opcode 0xdb /2. */
9610FNIEMOP_STUB_1(iemOp_fist_dw, uint8_t, bRm);
9611/** Opcode 0xdb /3. */
9612FNIEMOP_STUB_1(iemOp_fistp_dw, uint8_t, bRm);
9613/** Opcode 0xdb /5. */
9614FNIEMOP_STUB_1(iemOp_fld_xr, uint8_t, bRm);
9615/** Opcode 0xdb /7. */
9616FNIEMOP_STUB_1(iemOp_fstp_xr, uint8_t, bRm);
9617
9618
9619/** Opcode 0xdb 0xe0. */
9620FNIEMOP_DEF(iemOp_fneni)
9621{
9622 IEMOP_MNEMONIC("fneni (8087/ign)");
9623 IEM_MC_BEGIN(0,0);
9624 IEM_MC_ADVANCE_RIP();
9625 IEM_MC_END();
9626 return VINF_SUCCESS;
9627}
9628
9629
9630/** Opcode 0xdb 0xe1. */
9631FNIEMOP_DEF(iemOp_fndisi)
9632{
9633 IEMOP_MNEMONIC("fndisi (8087/ign)");
9634 IEM_MC_BEGIN(0,0);
9635 IEM_MC_ADVANCE_RIP();
9636 IEM_MC_END();
9637 return VINF_SUCCESS;
9638}
9639
9640
9641/** Opcode 0xdb 0xe2. */
9642FNIEMOP_STUB(iemOp_fnclex);
9643
9644
9645/** Opcode 0xdb 0xe3. */
9646FNIEMOP_DEF(iemOp_fninit)
9647{
9648 IEMOP_MNEMONIC("fninit");
9649 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
9650}
9651
9652
9653/** Opcode 0xdb 0xe4. */
9654FNIEMOP_DEF(iemOp_fnsetpm)
9655{
9656 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
9657 IEM_MC_BEGIN(0,0);
9658 IEM_MC_ADVANCE_RIP();
9659 IEM_MC_END();
9660 return VINF_SUCCESS;
9661}
9662
9663
9664/** Opcode 0xdb 0xe5. */
9665FNIEMOP_DEF(iemOp_frstpm)
9666{
9667 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
9668 IEM_MC_BEGIN(0,0);
9669 IEM_MC_ADVANCE_RIP();
9670 IEM_MC_END();
9671 return VINF_SUCCESS;
9672}
9673
9674
9675/** Opcode 0xdb. */
9676FNIEMOP_DEF(iemOp_EscF3)
9677{
9678 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9679 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9680 {
9681 switch (bRm & 0xf8)
9682 {
9683 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnb
9684 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovne
9685 case 0xd0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnbe
9686 case 0xd8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnu
9687 case 0xe0:
9688 IEMOP_HLP_NO_LOCK_PREFIX();
9689 switch (bRm)
9690 {
9691 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
9692 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
9693 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
9694 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
9695 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
9696 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
9697 default: return IEMOP_RAISE_INVALID_OPCODE();
9698 }
9699 break;
9700 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomi
9701 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomi
9702 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
9703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9704 }
9705 }
9706 else
9707 {
9708 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9709 {
9710 case 0: return FNIEMOP_CALL_1(iemOp_fild_dw, bRm);
9711 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_dw,bRm);
9712 case 2: return FNIEMOP_CALL_1(iemOp_fist_dw, bRm);
9713 case 3: return FNIEMOP_CALL_1(iemOp_fistp_dw, bRm);
9714 case 4: return IEMOP_RAISE_INVALID_OPCODE();
9715 case 5: return FNIEMOP_CALL_1(iemOp_fld_xr, bRm);
9716 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9717 case 7: return FNIEMOP_CALL_1(iemOp_fstp_xr, bRm);
9718 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9719 }
9720 }
9721}
9722
9723/** Opcode 0xdc. */
9724FNIEMOP_STUB(iemOp_EscF4);
9725/** Opcode 0xdd. */
9726FNIEMOP_STUB(iemOp_EscF5);
9727
9728/** Opcode 0xde 0xd9. */
9729FNIEMOP_STUB(iemOp_fcompp);
9730
9731/** Opcode 0xde. */
9732FNIEMOP_DEF(iemOp_EscF6)
9733{
9734 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9735 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9736 {
9737 switch (bRm & 0xf8)
9738 {
9739 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fiaddp
9740 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fimulp
9741 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
9742 case 0xd8:
9743 switch (bRm)
9744 {
9745 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
9746 default: return IEMOP_RAISE_INVALID_OPCODE();
9747 }
9748 case 0xe0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubrp
9749 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubp
9750 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivrp
9751 case 0xf8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivp
9752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9753 }
9754 }
9755 else
9756 {
9757#if 0
9758 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9759 {
9760 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
9761 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
9762 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
9763 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
9764 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
9765 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
9766 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
9767 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
9768 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9769 }
9770#endif
9771 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
9772 }
9773}
9774
9775
9776/** Opcode 0xdf 0xe0. */
9777FNIEMOP_DEF(iemOp_fnstsw_ax)
9778{
9779 IEMOP_MNEMONIC("fnstsw ax");
9780 IEMOP_HLP_NO_LOCK_PREFIX();
9781
9782 IEM_MC_BEGIN(0, 1);
9783 IEM_MC_LOCAL(uint16_t, u16Tmp);
9784 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
9785 IEM_MC_FETCH_FSW(u16Tmp);
9786 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
9787 IEM_MC_ADVANCE_RIP();
9788 IEM_MC_END();
9789 return VINF_SUCCESS;
9790}
9791
9792
9793/** Opcode 0xdf. */
9794FNIEMOP_DEF(iemOp_EscF7)
9795{
9796 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
9797 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9798 {
9799 switch (bRm & 0xf8)
9800 {
9801 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
9802 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
9803 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
9804 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
9805 case 0xe0:
9806 switch (bRm)
9807 {
9808 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
9809 default: return IEMOP_RAISE_INVALID_OPCODE();
9810 }
9811 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomip
9812 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomip
9813 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
9814 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9815 }
9816 }
9817 else
9818 {
9819 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
9820 }
9821}
9822
9823
9824/** Opcode 0xe0. */
9825FNIEMOP_DEF(iemOp_loopne_Jb)
9826{
9827 IEMOP_MNEMONIC("loopne Jb");
9828 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
9829 IEMOP_HLP_NO_LOCK_PREFIX();
9830 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9831
9832 switch (pIemCpu->enmEffAddrMode)
9833 {
9834 case IEMMODE_16BIT:
9835 IEM_MC_BEGIN(0,0);
9836 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
9837 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
9838 IEM_MC_REL_JMP_S8(i8Imm);
9839 } IEM_MC_ELSE() {
9840 IEM_MC_ADVANCE_RIP();
9841 } IEM_MC_ENDIF();
9842 IEM_MC_END();
9843 return VINF_SUCCESS;
9844
9845 case IEMMODE_32BIT:
9846 IEM_MC_BEGIN(0,0);
9847 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
9848 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
9849 IEM_MC_REL_JMP_S8(i8Imm);
9850 } IEM_MC_ELSE() {
9851 IEM_MC_ADVANCE_RIP();
9852 } IEM_MC_ENDIF();
9853 IEM_MC_END();
9854 return VINF_SUCCESS;
9855
9856 case IEMMODE_64BIT:
9857 IEM_MC_BEGIN(0,0);
9858 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
9859 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
9860 IEM_MC_REL_JMP_S8(i8Imm);
9861 } IEM_MC_ELSE() {
9862 IEM_MC_ADVANCE_RIP();
9863 } IEM_MC_ENDIF();
9864 IEM_MC_END();
9865 return VINF_SUCCESS;
9866
9867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9868 }
9869}
9870
9871
9872/** Opcode 0xe1. */
9873FNIEMOP_DEF(iemOp_loope_Jb)
9874{
9875 IEMOP_MNEMONIC("loope Jb");
9876 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
9877 IEMOP_HLP_NO_LOCK_PREFIX();
9878 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9879
9880 switch (pIemCpu->enmEffAddrMode)
9881 {
9882 case IEMMODE_16BIT:
9883 IEM_MC_BEGIN(0,0);
9884 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
9885 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
9886 IEM_MC_REL_JMP_S8(i8Imm);
9887 } IEM_MC_ELSE() {
9888 IEM_MC_ADVANCE_RIP();
9889 } IEM_MC_ENDIF();
9890 IEM_MC_END();
9891 return VINF_SUCCESS;
9892
9893 case IEMMODE_32BIT:
9894 IEM_MC_BEGIN(0,0);
9895 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
9896 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
9897 IEM_MC_REL_JMP_S8(i8Imm);
9898 } IEM_MC_ELSE() {
9899 IEM_MC_ADVANCE_RIP();
9900 } IEM_MC_ENDIF();
9901 IEM_MC_END();
9902 return VINF_SUCCESS;
9903
9904 case IEMMODE_64BIT:
9905 IEM_MC_BEGIN(0,0);
9906 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
9907 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
9908 IEM_MC_REL_JMP_S8(i8Imm);
9909 } IEM_MC_ELSE() {
9910 IEM_MC_ADVANCE_RIP();
9911 } IEM_MC_ENDIF();
9912 IEM_MC_END();
9913 return VINF_SUCCESS;
9914
9915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9916 }
9917}
9918
9919
9920/** Opcode 0xe2. */
9921FNIEMOP_DEF(iemOp_loop_Jb)
9922{
9923 IEMOP_MNEMONIC("loop Jb");
9924 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
9925 IEMOP_HLP_NO_LOCK_PREFIX();
9926 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9927
9928 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
9929 * using the 32-bit operand size override. How can that be restarted? See
9930 * weird pseudo code in intel manual. */
9931 switch (pIemCpu->enmEffAddrMode)
9932 {
9933 case IEMMODE_16BIT:
9934 IEM_MC_BEGIN(0,0);
9935 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
9936 IEM_MC_IF_CX_IS_NZ() {
9937 IEM_MC_REL_JMP_S8(i8Imm);
9938 } IEM_MC_ELSE() {
9939 IEM_MC_ADVANCE_RIP();
9940 } IEM_MC_ENDIF();
9941 IEM_MC_END();
9942 return VINF_SUCCESS;
9943
9944 case IEMMODE_32BIT:
9945 IEM_MC_BEGIN(0,0);
9946 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
9947 IEM_MC_IF_ECX_IS_NZ() {
9948 IEM_MC_REL_JMP_S8(i8Imm);
9949 } IEM_MC_ELSE() {
9950 IEM_MC_ADVANCE_RIP();
9951 } IEM_MC_ENDIF();
9952 IEM_MC_END();
9953 return VINF_SUCCESS;
9954
9955 case IEMMODE_64BIT:
9956 IEM_MC_BEGIN(0,0);
9957 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
9958 IEM_MC_IF_RCX_IS_NZ() {
9959 IEM_MC_REL_JMP_S8(i8Imm);
9960 } IEM_MC_ELSE() {
9961 IEM_MC_ADVANCE_RIP();
9962 } IEM_MC_ENDIF();
9963 IEM_MC_END();
9964 return VINF_SUCCESS;
9965
9966 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9967 }
9968}
9969
9970
9971/** Opcode 0xe3. */
9972FNIEMOP_DEF(iemOp_jecxz_Jb)
9973{
9974 IEMOP_MNEMONIC("jecxz Jb");
9975 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Imm);
9976 IEMOP_HLP_NO_LOCK_PREFIX();
9977 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9978
9979 switch (pIemCpu->enmEffAddrMode)
9980 {
9981 case IEMMODE_16BIT:
9982 IEM_MC_BEGIN(0,0);
9983 IEM_MC_IF_CX_IS_NZ() {
9984 IEM_MC_ADVANCE_RIP();
9985 } IEM_MC_ELSE() {
9986 IEM_MC_REL_JMP_S8(i8Imm);
9987 } IEM_MC_ENDIF();
9988 IEM_MC_END();
9989 return VINF_SUCCESS;
9990
9991 case IEMMODE_32BIT:
9992 IEM_MC_BEGIN(0,0);
9993 IEM_MC_IF_ECX_IS_NZ() {
9994 IEM_MC_ADVANCE_RIP();
9995 } IEM_MC_ELSE() {
9996 IEM_MC_REL_JMP_S8(i8Imm);
9997 } IEM_MC_ENDIF();
9998 IEM_MC_END();
9999 return VINF_SUCCESS;
10000
10001 case IEMMODE_64BIT:
10002 IEM_MC_BEGIN(0,0);
10003 IEM_MC_IF_RCX_IS_NZ() {
10004 IEM_MC_ADVANCE_RIP();
10005 } IEM_MC_ELSE() {
10006 IEM_MC_REL_JMP_S8(i8Imm);
10007 } IEM_MC_ENDIF();
10008 IEM_MC_END();
10009 return VINF_SUCCESS;
10010
10011 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10012 }
10013}
10014
10015
10016/** Opcode 0xe4 */
10017FNIEMOP_DEF(iemOp_in_AL_Ib)
10018{
10019 IEMOP_MNEMONIC("in eAX,Ib");
10020 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10021 IEMOP_HLP_NO_LOCK_PREFIX();
10022 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
10023}
10024
10025
10026/** Opcode 0xe5 */
10027FNIEMOP_DEF(iemOp_in_eAX_Ib)
10028{
10029 IEMOP_MNEMONIC("in eAX,Ib");
10030 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10031 IEMOP_HLP_NO_LOCK_PREFIX();
10032 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10033}
10034
10035
10036/** Opcode 0xe6 */
10037FNIEMOP_DEF(iemOp_out_Ib_AL)
10038{
10039 IEMOP_MNEMONIC("out Ib,AL");
10040 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10041 IEMOP_HLP_NO_LOCK_PREFIX();
10042 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
10043}
10044
10045
10046/** Opcode 0xe7 */
10047FNIEMOP_DEF(iemOp_out_Ib_eAX)
10048{
10049 IEMOP_MNEMONIC("out Ib,eAX");
10050 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10051 IEMOP_HLP_NO_LOCK_PREFIX();
10052 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10053}
10054
10055
10056/** Opcode 0xe8. */
10057FNIEMOP_DEF(iemOp_call_Jv)
10058{
10059 IEMOP_MNEMONIC("call Jv");
10060 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10061 switch (pIemCpu->enmEffOpSize)
10062 {
10063 case IEMMODE_16BIT:
10064 {
10065 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
10066 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int32_t)u16Imm);
10067 }
10068
10069 case IEMMODE_32BIT:
10070 {
10071 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
10072 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
10073 }
10074
10075 case IEMMODE_64BIT:
10076 {
10077 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
10078 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
10079 }
10080
10081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10082 }
10083}
10084
10085
10086/** Opcode 0xe9. */
10087FNIEMOP_DEF(iemOp_jmp_Jv)
10088{
10089 IEMOP_MNEMONIC("jmp Jv");
10090 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10091 switch (pIemCpu->enmEffOpSize)
10092 {
10093 case IEMMODE_16BIT:
10094 {
10095 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
10096 IEM_MC_BEGIN(0, 0);
10097 IEM_MC_REL_JMP_S16((int16_t)u16Imm);
10098 IEM_MC_END();
10099 return VINF_SUCCESS;
10100 }
10101
10102 case IEMMODE_64BIT:
10103 case IEMMODE_32BIT:
10104 {
10105 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
10106 IEM_MC_BEGIN(0, 0);
10107 IEM_MC_REL_JMP_S32((int32_t)u32Imm);
10108 IEM_MC_END();
10109 return VINF_SUCCESS;
10110 }
10111
10112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10113 }
10114}
10115
10116
10117/** Opcode 0xea. */
10118FNIEMOP_DEF(iemOp_jmp_Ap)
10119{
10120 IEMOP_MNEMONIC("jmp Ap");
10121 IEMOP_HLP_NO_64BIT();
10122
10123 /* Decode the far pointer address and pass it on to the far call C implementation. */
10124 uint32_t offSeg;
10125 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10126 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &offSeg);
10127 else
10128 {
10129 uint16_t offSeg16; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &offSeg16);
10130 offSeg = offSeg16;
10131 }
10132 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &uSel);
10133 IEMOP_HLP_NO_LOCK_PREFIX();
10134 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
10135}
10136
10137
10138/** Opcode 0xeb. */
10139FNIEMOP_DEF(iemOp_jmp_Jb)
10140{
10141 IEMOP_MNEMONIC("jmp Jb");
10142 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10143 IEMOP_HLP_NO_LOCK_PREFIX();
10144 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10145
10146 IEM_MC_BEGIN(0, 0);
10147 IEM_MC_REL_JMP_S8((int8_t)u8Imm);
10148 IEM_MC_END();
10149 return VINF_SUCCESS;
10150}
10151
10152
10153/** Opcode 0xec */
10154FNIEMOP_DEF(iemOp_in_AL_DX)
10155{
10156 IEMOP_MNEMONIC("in AL,DX");
10157 IEMOP_HLP_NO_LOCK_PREFIX();
10158 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
10159}
10160
10161
10162/** Opcode 0xed */
10163FNIEMOP_DEF(iemOp_eAX_DX)
10164{
10165 IEMOP_MNEMONIC("in eAX,DX");
10166 IEMOP_HLP_NO_LOCK_PREFIX();
10167 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10168}
10169
10170
10171/** Opcode 0xee */
10172FNIEMOP_DEF(iemOp_out_DX_AL)
10173{
10174 IEMOP_MNEMONIC("out DX,AL");
10175 IEMOP_HLP_NO_LOCK_PREFIX();
10176 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
10177}
10178
10179
10180/** Opcode 0xef */
10181FNIEMOP_DEF(iemOp_out_DX_eAX)
10182{
10183 IEMOP_MNEMONIC("out DX,eAX");
10184 IEMOP_HLP_NO_LOCK_PREFIX();
10185 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10186}
10187
10188
10189/** Opcode 0xf0. */
10190FNIEMOP_DEF(iemOp_lock)
10191{
10192 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
10193
10194 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
10195 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10196}
10197
10198
10199/** Opcode 0xf2. */
10200FNIEMOP_DEF(iemOp_repne)
10201{
10202 /* This overrides any previous REPE prefix. */
10203 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
10204 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
10205
10206 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
10207 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10208}
10209
10210
10211/** Opcode 0xf3. */
10212FNIEMOP_DEF(iemOp_repe)
10213{
10214 /* This overrides any previous REPNE prefix. */
10215 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
10216 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
10217
10218 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
10219 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10220}
10221
10222
10223/** Opcode 0xf4. */
10224FNIEMOP_DEF(iemOp_hlt)
10225{
10226 IEMOP_HLP_NO_LOCK_PREFIX();
10227 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
10228}
10229
10230
10231/** Opcode 0xf5. */
10232FNIEMOP_STUB(iemOp_cmc);
10233
10234
10235/**
10236 * Common implementation of 'inc/dec/not/neg Eb'.
10237 *
10238 * @param bRm The RM byte.
10239 * @param pImpl The instruction implementation.
10240 */
10241FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10242{
10243 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10244 {
10245 /* register access */
10246 IEM_MC_BEGIN(2, 0);
10247 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10248 IEM_MC_ARG(uint32_t *, pEFlags, 1);
10249 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10250 IEM_MC_REF_EFLAGS(pEFlags);
10251 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10252 IEM_MC_ADVANCE_RIP();
10253 IEM_MC_END();
10254 }
10255 else
10256 {
10257 /* memory access. */
10258 IEM_MC_BEGIN(2, 2);
10259 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10260 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10261 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10262
10263 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10264 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10265 IEM_MC_FETCH_EFLAGS(EFlags);
10266 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10267 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10268 else
10269 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
10270
10271 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10272 IEM_MC_COMMIT_EFLAGS(EFlags);
10273 IEM_MC_ADVANCE_RIP();
10274 IEM_MC_END();
10275 }
10276 return VINF_SUCCESS;
10277}
10278
10279
10280/**
10281 * Common implementation of 'inc/dec/not/neg Ev'.
10282 *
10283 * @param bRm The RM byte.
10284 * @param pImpl The instruction implementation.
10285 */
10286FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10287{
10288 /* Registers are handled by a common worker. */
10289 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10290 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10291
10292 /* Memory we do here. */
10293 switch (pIemCpu->enmEffOpSize)
10294 {
10295 case IEMMODE_16BIT:
10296 IEM_MC_BEGIN(2, 2);
10297 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10298 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10299 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10300
10301 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10302 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10303 IEM_MC_FETCH_EFLAGS(EFlags);
10304 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10305 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
10306 else
10307 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
10308
10309 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10310 IEM_MC_COMMIT_EFLAGS(EFlags);
10311 IEM_MC_ADVANCE_RIP();
10312 IEM_MC_END();
10313 return VINF_SUCCESS;
10314
10315 case IEMMODE_32BIT:
10316 IEM_MC_BEGIN(2, 2);
10317 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10318 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10319 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10320
10321 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10322 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10323 IEM_MC_FETCH_EFLAGS(EFlags);
10324 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10325 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
10326 else
10327 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
10328
10329 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10330 IEM_MC_COMMIT_EFLAGS(EFlags);
10331 IEM_MC_ADVANCE_RIP();
10332 IEM_MC_END();
10333 return VINF_SUCCESS;
10334
10335 case IEMMODE_64BIT:
10336 IEM_MC_BEGIN(2, 2);
10337 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10338 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10339 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10340
10341 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10342 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10343 IEM_MC_FETCH_EFLAGS(EFlags);
10344 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10345 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
10346 else
10347 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
10348
10349 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10350 IEM_MC_COMMIT_EFLAGS(EFlags);
10351 IEM_MC_ADVANCE_RIP();
10352 IEM_MC_END();
10353 return VINF_SUCCESS;
10354
10355 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10356 }
10357}
10358
10359
10360/** Opcode 0xf6 /0. */
10361FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
10362{
10363 IEMOP_MNEMONIC("test Eb,Ib");
10364 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10365
10366 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10367 {
10368 /* register access */
10369 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10370 IEMOP_HLP_NO_LOCK_PREFIX();
10371
10372 IEM_MC_BEGIN(3, 0);
10373 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10374 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
10375 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10376 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10377 IEM_MC_REF_EFLAGS(pEFlags);
10378 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10379 IEM_MC_ADVANCE_RIP();
10380 IEM_MC_END();
10381 }
10382 else
10383 {
10384 /* memory access. */
10385 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10386
10387 IEM_MC_BEGIN(3, 2);
10388 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10389 IEM_MC_ARG(uint8_t, u8Src, 1);
10390 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10391 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10392
10393 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10394 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &u8Imm);
10395 IEM_MC_ASSIGN(u8Src, u8Imm);
10396 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10397 IEM_MC_FETCH_EFLAGS(EFlags);
10398 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10399
10400 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
10401 IEM_MC_COMMIT_EFLAGS(EFlags);
10402 IEM_MC_ADVANCE_RIP();
10403 IEM_MC_END();
10404 }
10405 return VINF_SUCCESS;
10406}
10407
10408
10409/** Opcode 0xf7 /0. */
10410FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
10411{
10412 IEMOP_MNEMONIC("test Ev,Iv");
10413 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10414 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10415
10416 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10417 {
10418 /* register access */
10419 switch (pIemCpu->enmEffOpSize)
10420 {
10421 case IEMMODE_16BIT:
10422 {
10423 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
10424 IEM_MC_BEGIN(3, 0);
10425 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10426 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
10427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10428 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10429 IEM_MC_REF_EFLAGS(pEFlags);
10430 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10431 IEM_MC_ADVANCE_RIP();
10432 IEM_MC_END();
10433 return VINF_SUCCESS;
10434 }
10435
10436 case IEMMODE_32BIT:
10437 {
10438 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
10439 IEM_MC_BEGIN(3, 0);
10440 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10441 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
10442 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10443 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10444 IEM_MC_REF_EFLAGS(pEFlags);
10445 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10446 IEM_MC_ADVANCE_RIP();
10447 IEM_MC_END();
10448 return VINF_SUCCESS;
10449 }
10450
10451 case IEMMODE_64BIT:
10452 {
10453 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
10454 IEM_MC_BEGIN(3, 0);
10455 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10456 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
10457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10458 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10459 IEM_MC_REF_EFLAGS(pEFlags);
10460 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10461 IEM_MC_ADVANCE_RIP();
10462 IEM_MC_END();
10463 return VINF_SUCCESS;
10464 }
10465
10466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10467 }
10468 }
10469 else
10470 {
10471 /* memory access. */
10472 switch (pIemCpu->enmEffOpSize)
10473 {
10474 case IEMMODE_16BIT:
10475 {
10476 IEM_MC_BEGIN(3, 2);
10477 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10478 IEM_MC_ARG(uint16_t, u16Src, 1);
10479 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10480 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10481
10482 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10483 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16Imm);
10484 IEM_MC_ASSIGN(u16Src, u16Imm);
10485 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10486 IEM_MC_FETCH_EFLAGS(EFlags);
10487 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10488
10489 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
10490 IEM_MC_COMMIT_EFLAGS(EFlags);
10491 IEM_MC_ADVANCE_RIP();
10492 IEM_MC_END();
10493 return VINF_SUCCESS;
10494 }
10495
10496 case IEMMODE_32BIT:
10497 {
10498 IEM_MC_BEGIN(3, 2);
10499 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10500 IEM_MC_ARG(uint32_t, u32Src, 1);
10501 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10502 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10503
10504 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10505 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Imm);
10506 IEM_MC_ASSIGN(u32Src, u32Imm);
10507 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10508 IEM_MC_FETCH_EFLAGS(EFlags);
10509 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10510
10511 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
10512 IEM_MC_COMMIT_EFLAGS(EFlags);
10513 IEM_MC_ADVANCE_RIP();
10514 IEM_MC_END();
10515 return VINF_SUCCESS;
10516 }
10517
10518 case IEMMODE_64BIT:
10519 {
10520 IEM_MC_BEGIN(3, 2);
10521 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10522 IEM_MC_ARG(uint64_t, u64Src, 1);
10523 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10524 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10525
10526 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10527 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64Imm);
10528 IEM_MC_ASSIGN(u64Src, u64Imm);
10529 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10530 IEM_MC_FETCH_EFLAGS(EFlags);
10531 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10532
10533 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
10534 IEM_MC_COMMIT_EFLAGS(EFlags);
10535 IEM_MC_ADVANCE_RIP();
10536 IEM_MC_END();
10537 return VINF_SUCCESS;
10538 }
10539
10540 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10541 }
10542 }
10543}
10544
10545
10546/** Opcode 0xf6 /4, /5, /6 and /7. */
10547FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
10548{
10549 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10550
10551 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10552 {
10553 /* register access */
10554 IEMOP_HLP_NO_LOCK_PREFIX();
10555 IEM_MC_BEGIN(3, 0);
10556 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10557 IEM_MC_ARG(uint8_t, u8Value, 1);
10558 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10559 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10560 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10561 IEM_MC_REF_EFLAGS(pEFlags);
10562 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10563 IEM_MC_ADVANCE_RIP();
10564 IEM_MC_END();
10565 }
10566 else
10567 {
10568 /* memory access. */
10569 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10570
10571 IEM_MC_BEGIN(3, 1);
10572 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10573 IEM_MC_ARG(uint8_t, u8Value, 1);
10574 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10575 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10576
10577 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10578 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
10579 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10580 IEM_MC_REF_EFLAGS(pEFlags);
10581 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10582
10583 IEM_MC_ADVANCE_RIP();
10584 IEM_MC_END();
10585 }
10586 return VINF_SUCCESS;
10587}
10588
10589
10590/** Opcode 0xf7 /4, /5, /6 and /7. */
10591FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
10592{
10593 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10594 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10595
10596 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10597 {
10598 /* register access */
10599 switch (pIemCpu->enmEffOpSize)
10600 {
10601 case IEMMODE_16BIT:
10602 {
10603 IEMOP_HLP_NO_LOCK_PREFIX();
10604 IEM_MC_BEGIN(3, 1);
10605 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10606 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10607 IEM_MC_ARG(uint16_t, u16Value, 2);
10608 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10609 IEM_MC_LOCAL(int32_t, rc);
10610
10611 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10612 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10613 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10614 IEM_MC_REF_EFLAGS(pEFlags);
10615 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10616 IEM_MC_IF_LOCAL_IS_Z(rc) {
10617 IEM_MC_ADVANCE_RIP();
10618 } IEM_MC_ELSE() {
10619 IEM_MC_RAISE_DIVIDE_ERROR();
10620 } IEM_MC_ENDIF();
10621
10622 IEM_MC_END();
10623 return VINF_SUCCESS;
10624 }
10625
10626 case IEMMODE_32BIT:
10627 {
10628 IEMOP_HLP_NO_LOCK_PREFIX();
10629 IEM_MC_BEGIN(3, 1);
10630 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10631 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10632 IEM_MC_ARG(uint32_t, u32Value, 2);
10633 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10634 IEM_MC_LOCAL(int32_t, rc);
10635
10636 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10637 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10638 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10639 IEM_MC_REF_EFLAGS(pEFlags);
10640 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10641 IEM_MC_IF_LOCAL_IS_Z(rc) {
10642 IEM_MC_ADVANCE_RIP();
10643 } IEM_MC_ELSE() {
10644 IEM_MC_RAISE_DIVIDE_ERROR();
10645 } IEM_MC_ENDIF();
10646
10647 IEM_MC_END();
10648 return VINF_SUCCESS;
10649 }
10650
10651 case IEMMODE_64BIT:
10652 {
10653 IEMOP_HLP_NO_LOCK_PREFIX();
10654 IEM_MC_BEGIN(3, 1);
10655 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10656 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10657 IEM_MC_ARG(uint64_t, u64Value, 2);
10658 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10659 IEM_MC_LOCAL(int32_t, rc);
10660
10661 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10662 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10663 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10664 IEM_MC_REF_EFLAGS(pEFlags);
10665 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
10666 IEM_MC_IF_LOCAL_IS_Z(rc) {
10667 IEM_MC_ADVANCE_RIP();
10668 } IEM_MC_ELSE() {
10669 IEM_MC_RAISE_DIVIDE_ERROR();
10670 } IEM_MC_ENDIF();
10671
10672 IEM_MC_END();
10673 return VINF_SUCCESS;
10674 }
10675
10676 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10677 }
10678 }
10679 else
10680 {
10681 /* memory access. */
10682 switch (pIemCpu->enmEffOpSize)
10683 {
10684 case IEMMODE_16BIT:
10685 {
10686 IEMOP_HLP_NO_LOCK_PREFIX();
10687 IEM_MC_BEGIN(3, 2);
10688 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10689 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10690 IEM_MC_ARG(uint16_t, u16Value, 2);
10691 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10692 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10693 IEM_MC_LOCAL(int32_t, rc);
10694
10695 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10696 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10697 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10698 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10699 IEM_MC_REF_EFLAGS(pEFlags);
10700 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10701 IEM_MC_IF_LOCAL_IS_Z(rc) {
10702 IEM_MC_ADVANCE_RIP();
10703 } IEM_MC_ELSE() {
10704 IEM_MC_RAISE_DIVIDE_ERROR();
10705 } IEM_MC_ENDIF();
10706
10707 IEM_MC_END();
10708 return VINF_SUCCESS;
10709 }
10710
10711 case IEMMODE_32BIT:
10712 {
10713 IEMOP_HLP_NO_LOCK_PREFIX();
10714 IEM_MC_BEGIN(3, 2);
10715 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10716 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10717 IEM_MC_ARG(uint32_t, u32Value, 2);
10718 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10719 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10720 IEM_MC_LOCAL(int32_t, rc);
10721
10722 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10723 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
10724 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10725 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10726 IEM_MC_REF_EFLAGS(pEFlags);
10727 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10728 IEM_MC_IF_LOCAL_IS_Z(rc) {
10729 IEM_MC_ADVANCE_RIP();
10730 } IEM_MC_ELSE() {
10731 IEM_MC_RAISE_DIVIDE_ERROR();
10732 } IEM_MC_ENDIF();
10733
10734 IEM_MC_END();
10735 return VINF_SUCCESS;
10736 }
10737
10738 case IEMMODE_64BIT:
10739 {
10740 IEMOP_HLP_NO_LOCK_PREFIX();
10741 IEM_MC_BEGIN(3, 2);
10742 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10743 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10744 IEM_MC_ARG(uint64_t, u64Value, 2);
10745 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10746 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10747 IEM_MC_LOCAL(int32_t, rc);
10748
10749 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10750 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
10751 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10752 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10753 IEM_MC_REF_EFLAGS(pEFlags);
10754 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
10755 IEM_MC_IF_LOCAL_IS_Z(rc) {
10756 IEM_MC_ADVANCE_RIP();
10757 } IEM_MC_ELSE() {
10758 IEM_MC_RAISE_DIVIDE_ERROR();
10759 } IEM_MC_ENDIF();
10760
10761 IEM_MC_END();
10762 return VINF_SUCCESS;
10763 }
10764
10765 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10766 }
10767 }
10768}
10769
10770/** Opcode 0xf6. */
10771FNIEMOP_DEF(iemOp_Grp3_Eb)
10772{
10773 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
10774 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10775 {
10776 case 0:
10777 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
10778 case 1:
10779 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
10780 case 2:
10781 IEMOP_MNEMONIC("not Eb");
10782 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
10783 case 3:
10784 IEMOP_MNEMONIC("neg Eb");
10785 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
10786 case 4:
10787 IEMOP_MNEMONIC("mul Eb");
10788 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10789 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, &iemAImpl_mul_u8);
10790 case 5:
10791 IEMOP_MNEMONIC("imul Eb");
10792 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10793 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, &iemAImpl_imul_u8);
10794 case 6:
10795 IEMOP_MNEMONIC("div Eb");
10796 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10797 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, &iemAImpl_div_u8);
10798 case 7:
10799 IEMOP_MNEMONIC("idiv Eb");
10800 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10801 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, &iemAImpl_idiv_u8);
10802 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10803 }
10804}
10805
10806
10807/** Opcode 0xf7. */
10808FNIEMOP_DEF(iemOp_Grp3_Ev)
10809{
10810 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
10811 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10812 {
10813 case 0:
10814 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
10815 case 1:
10816 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
10817 case 2:
10818 IEMOP_MNEMONIC("not Ev");
10819 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
10820 case 3:
10821 IEMOP_MNEMONIC("neg Ev");
10822 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
10823 case 4:
10824 IEMOP_MNEMONIC("mul Ev");
10825 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10826 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
10827 case 5:
10828 IEMOP_MNEMONIC("imul Ev");
10829 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10830 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
10831 case 6:
10832 IEMOP_MNEMONIC("div Ev");
10833 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10834 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
10835 case 7:
10836 IEMOP_MNEMONIC("idiv Ev");
10837 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
10838 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
10839 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10840 }
10841}
10842
10843
10844/** Opcode 0xf8. */
10845FNIEMOP_DEF(iemOp_clc)
10846{
10847 IEMOP_MNEMONIC("clc");
10848 IEMOP_HLP_NO_LOCK_PREFIX();
10849 IEM_MC_BEGIN(0, 0);
10850 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
10851 IEM_MC_ADVANCE_RIP();
10852 IEM_MC_END();
10853 return VINF_SUCCESS;
10854}
10855
10856
10857/** Opcode 0xf9. */
10858FNIEMOP_DEF(iemOp_stc)
10859{
10860 IEMOP_MNEMONIC("slc");
10861 IEMOP_HLP_NO_LOCK_PREFIX();
10862 IEM_MC_BEGIN(0, 0);
10863 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
10864 IEM_MC_ADVANCE_RIP();
10865 IEM_MC_END();
10866 return VINF_SUCCESS;
10867}
10868
10869
10870/** Opcode 0xfa. */
10871FNIEMOP_DEF(iemOp_cli)
10872{
10873 IEMOP_MNEMONIC("cli");
10874 IEMOP_HLP_NO_LOCK_PREFIX();
10875 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
10876}
10877
10878
10879FNIEMOP_DEF(iemOp_sti)
10880{
10881 IEMOP_MNEMONIC("sti");
10882 IEMOP_HLP_NO_LOCK_PREFIX();
10883 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
10884}
10885
10886
10887/** Opcode 0xfc. */
10888FNIEMOP_DEF(iemOp_cld)
10889{
10890 IEMOP_MNEMONIC("cld");
10891 IEMOP_HLP_NO_LOCK_PREFIX();
10892 IEM_MC_BEGIN(0, 0);
10893 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
10894 IEM_MC_ADVANCE_RIP();
10895 IEM_MC_END();
10896 return VINF_SUCCESS;
10897}
10898
10899
10900/** Opcode 0xfd. */
10901FNIEMOP_DEF(iemOp_std)
10902{
10903 IEMOP_MNEMONIC("std");
10904 IEMOP_HLP_NO_LOCK_PREFIX();
10905 IEM_MC_BEGIN(0, 0);
10906 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
10907 IEM_MC_ADVANCE_RIP();
10908 IEM_MC_END();
10909 return VINF_SUCCESS;
10910}
10911
10912
10913/** Opcode 0xfe. */
10914FNIEMOP_DEF(iemOp_Grp4)
10915{
10916 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
10917 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10918 {
10919 case 0:
10920 IEMOP_MNEMONIC("inc Ev");
10921 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
10922 case 1:
10923 IEMOP_MNEMONIC("dec Ev");
10924 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
10925 default:
10926 IEMOP_MNEMONIC("grp4-ud");
10927 return IEMOP_RAISE_INVALID_OPCODE();
10928 }
10929}
10930
10931
10932/**
10933 * Opcode 0xff /2.
10934 * @param bRm The RM byte.
10935 */
10936FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
10937{
10938 IEMOP_MNEMONIC("calln Ev");
10939 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
10940 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10941
10942 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10943 {
10944 /* The new RIP is taken from a register. */
10945 switch (pIemCpu->enmEffOpSize)
10946 {
10947 case IEMMODE_16BIT:
10948 IEM_MC_BEGIN(1, 0);
10949 IEM_MC_ARG(uint16_t, u16Target, 0);
10950 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10951 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
10952 IEM_MC_END()
10953 return VINF_SUCCESS;
10954
10955 case IEMMODE_32BIT:
10956 IEM_MC_BEGIN(1, 0);
10957 IEM_MC_ARG(uint32_t, u32Target, 0);
10958 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10959 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
10960 IEM_MC_END()
10961 return VINF_SUCCESS;
10962
10963 case IEMMODE_64BIT:
10964 IEM_MC_BEGIN(1, 0);
10965 IEM_MC_ARG(uint64_t, u64Target, 0);
10966 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10967 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
10968 IEM_MC_END()
10969 return VINF_SUCCESS;
10970
10971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10972 }
10973 }
10974 else
10975 {
10976 /* The new RIP is taken from a register. */
10977 switch (pIemCpu->enmEffOpSize)
10978 {
10979 case IEMMODE_16BIT:
10980 IEM_MC_BEGIN(1, 1);
10981 IEM_MC_ARG(uint16_t, u16Target, 0);
10982 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10983 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
10984 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
10985 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
10986 IEM_MC_END()
10987 return VINF_SUCCESS;
10988
10989 case IEMMODE_32BIT:
10990 IEM_MC_BEGIN(1, 1);
10991 IEM_MC_ARG(uint32_t, u32Target, 0);
10992 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10993 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
10994 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
10995 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
10996 IEM_MC_END()
10997 return VINF_SUCCESS;
10998
10999 case IEMMODE_64BIT:
11000 IEM_MC_BEGIN(1, 1);
11001 IEM_MC_ARG(uint64_t, u64Target, 0);
11002 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11003 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11004 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11005 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11006 IEM_MC_END()
11007 return VINF_SUCCESS;
11008
11009 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11010 }
11011 }
11012}
11013
11014
11015/**
11016 * Opcode 0xff /3.
11017 * @param bRm The RM byte.
11018 */
11019FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
11020{
11021 IEMOP_MNEMONIC("callf Ep");
11022 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11023
11024 /* Registers? How?? */
11025 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11026 {
11027 /** @todo How the heck does a 'callf eax' work? Probably just have to
11028 * search the docs... */
11029 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11030 }
11031
11032 /* Far pointer loaded from memory. */
11033 switch (pIemCpu->enmEffOpSize)
11034 {
11035 case IEMMODE_16BIT:
11036 IEM_MC_BEGIN(3, 1);
11037 IEM_MC_ARG(uint16_t, u16Sel, 0);
11038 IEM_MC_ARG(uint16_t, offSeg, 1);
11039 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11040 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11041 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11042 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11043 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc + 2);
11044 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11045 IEM_MC_END();
11046 return VINF_SUCCESS;
11047
11048 case IEMMODE_32BIT:
11049 IEM_MC_BEGIN(3, 1);
11050 IEM_MC_ARG(uint16_t, u16Sel, 0);
11051 IEM_MC_ARG(uint32_t, offSeg, 1);
11052 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11053 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11054 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11055 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11056 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc + 4);
11057 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11058 IEM_MC_END();
11059 return VINF_SUCCESS;
11060
11061 case IEMMODE_64BIT:
11062 IEM_MC_BEGIN(3, 1);
11063 IEM_MC_ARG(uint16_t, u16Sel, 0);
11064 IEM_MC_ARG(uint64_t, offSeg, 1);
11065 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11066 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11067 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11068 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11069 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc + 8);
11070 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11071 IEM_MC_END();
11072 return VINF_SUCCESS;
11073
11074 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11075 }
11076}
11077
11078
11079/**
11080 * Opcode 0xff /4.
11081 * @param bRm The RM byte.
11082 */
11083FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
11084{
11085 IEMOP_MNEMONIC("callf Ep");
11086 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11087 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11088
11089 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11090 {
11091 /* The new RIP is taken from a register. */
11092 switch (pIemCpu->enmEffOpSize)
11093 {
11094 case IEMMODE_16BIT:
11095 IEM_MC_BEGIN(0, 1);
11096 IEM_MC_LOCAL(uint16_t, u16Target);
11097 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11098 IEM_MC_SET_RIP_U16(u16Target);
11099 IEM_MC_END()
11100 return VINF_SUCCESS;
11101
11102 case IEMMODE_32BIT:
11103 IEM_MC_BEGIN(0, 1);
11104 IEM_MC_LOCAL(uint32_t, u32Target);
11105 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11106 IEM_MC_SET_RIP_U32(u32Target);
11107 IEM_MC_END()
11108 return VINF_SUCCESS;
11109
11110 case IEMMODE_64BIT:
11111 IEM_MC_BEGIN(0, 1);
11112 IEM_MC_LOCAL(uint64_t, u64Target);
11113 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11114 IEM_MC_SET_RIP_U64(u64Target);
11115 IEM_MC_END()
11116 return VINF_SUCCESS;
11117
11118 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11119 }
11120 }
11121 else
11122 {
11123 /* The new RIP is taken from a register. */
11124 switch (pIemCpu->enmEffOpSize)
11125 {
11126 case IEMMODE_16BIT:
11127 IEM_MC_BEGIN(0, 2);
11128 IEM_MC_LOCAL(uint16_t, u16Target);
11129 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11130 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11131 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11132 IEM_MC_SET_RIP_U16(u16Target);
11133 IEM_MC_END()
11134 return VINF_SUCCESS;
11135
11136 case IEMMODE_32BIT:
11137 IEM_MC_BEGIN(0, 2);
11138 IEM_MC_LOCAL(uint32_t, u32Target);
11139 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11140 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11141 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11142 IEM_MC_SET_RIP_U32(u32Target);
11143 IEM_MC_END()
11144 return VINF_SUCCESS;
11145
11146 case IEMMODE_64BIT:
11147 IEM_MC_BEGIN(0, 2);
11148 IEM_MC_LOCAL(uint32_t, u32Target);
11149 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11150 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11151 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11152 IEM_MC_SET_RIP_U32(u32Target);
11153 IEM_MC_END()
11154 return VINF_SUCCESS;
11155
11156 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11157 }
11158 }
11159}
11160
11161
11162/**
11163 * Opcode 0xff /5.
11164 * @param bRm The RM byte.
11165 */
11166FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
11167{
11168 IEMOP_MNEMONIC("jmp Ap");
11169 IEMOP_HLP_NO_64BIT();
11170 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
11171
11172 /* Decode the far pointer address and pass it on to the far call C
11173 implementation. */
11174 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11175 {
11176 /** @todo How the heck does a 'callf eax' work? Probably just have to
11177 * search the docs... */
11178 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11179 }
11180
11181 /* Far pointer loaded from memory. */
11182 switch (pIemCpu->enmEffOpSize)
11183 {
11184 case IEMMODE_16BIT:
11185 IEM_MC_BEGIN(3, 1);
11186 IEM_MC_ARG(uint16_t, u16Sel, 0);
11187 IEM_MC_ARG(uint16_t, offSeg, 1);
11188 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11190 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11191 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11192 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc + 2);
11193 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11194 IEM_MC_END();
11195 return VINF_SUCCESS;
11196
11197 case IEMMODE_32BIT:
11198 IEM_MC_BEGIN(3, 1);
11199 IEM_MC_ARG(uint16_t, u16Sel, 0);
11200 IEM_MC_ARG(uint32_t, offSeg, 1);
11201 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11202 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11203 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11204 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11205 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc + 4);
11206 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11207 IEM_MC_END();
11208 return VINF_SUCCESS;
11209
11210 case IEMMODE_64BIT:
11211 IEM_MC_BEGIN(3, 1);
11212 IEM_MC_ARG(uint16_t, u16Sel, 0);
11213 IEM_MC_ARG(uint64_t, offSeg, 1);
11214 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11215 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11216 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11217 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11218 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc + 8);
11219 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11220 IEM_MC_END();
11221 return VINF_SUCCESS;
11222
11223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11224 }
11225}
11226
11227
11228/**
11229 * Opcode 0xff /6.
11230 * @param bRm The RM byte.
11231 */
11232FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
11233{
11234 IEMOP_MNEMONIC("push Ev");
11235 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11236
11237 /* Registers are handled by a common worker. */
11238 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11239 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11240
11241 /* Memory we do here. */
11242 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11243 switch (pIemCpu->enmEffOpSize)
11244 {
11245 case IEMMODE_16BIT:
11246 IEM_MC_BEGIN(0, 2);
11247 IEM_MC_LOCAL(uint16_t, u16Src);
11248 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11249 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11250 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11251 IEM_MC_PUSH_U16(u16Src);
11252 IEM_MC_ADVANCE_RIP();
11253 IEM_MC_END();
11254 return VINF_SUCCESS;
11255
11256 case IEMMODE_32BIT:
11257 IEM_MC_BEGIN(0, 2);
11258 IEM_MC_LOCAL(uint32_t, u32Src);
11259 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11260 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11261 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11262 IEM_MC_PUSH_U32(u32Src);
11263 IEM_MC_ADVANCE_RIP();
11264 IEM_MC_END();
11265 return VINF_SUCCESS;
11266
11267 case IEMMODE_64BIT:
11268 IEM_MC_BEGIN(0, 2);
11269 IEM_MC_LOCAL(uint64_t, u64Src);
11270 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11271 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11272 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11273 IEM_MC_PUSH_U64(u64Src);
11274 IEM_MC_ADVANCE_RIP();
11275 IEM_MC_END();
11276 return VINF_SUCCESS;
11277 }
11278 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11279}
11280
11281
11282/** Opcode 0xff. */
11283FNIEMOP_DEF(iemOp_Grp5)
11284{
11285 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm);
11286 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11287 {
11288 case 0:
11289 IEMOP_MNEMONIC("inc Ev");
11290 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
11291 case 1:
11292 IEMOP_MNEMONIC("dec Ev");
11293 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
11294 case 2:
11295 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
11296 case 3:
11297 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
11298 case 4:
11299 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
11300 case 5:
11301 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
11302 case 6:
11303 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
11304 case 7:
11305 IEMOP_MNEMONIC("grp5-ud");
11306 return IEMOP_RAISE_INVALID_OPCODE();
11307 }
11308 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
11309}
11310
11311
11312
11313const PFNIEMOP g_apfnOneByteMap[256] =
11314{
11315 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
11316 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
11317 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
11318 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
11319 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
11320 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
11321 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
11322 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
11323 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
11324 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
11325 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
11326 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
11327 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
11328 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
11329 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
11330 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
11331 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
11332 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
11333 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
11334 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
11335 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
11336 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
11337 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
11338 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
11339 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
11340 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
11341 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
11342 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
11343 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
11344 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
11345 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
11346 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
11347 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
11348 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
11349 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
11350 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
11351 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
11352 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
11353 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
11354 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
11355 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
11356 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
11357 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
11358 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
11359 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
11360 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
11361 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
11362 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
11363 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
11364 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
11365 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
11366 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
11367 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
11368 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
11369 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
11370 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
11371 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
11372 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
11373 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
11374 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
11375 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe,
11376 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
11377 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
11378 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
11379};
11380
11381
11382/** @} */
11383
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette