VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 47626

Last change on this file since 47626 was 47568, checked in by vboxsync, 11 years ago

IEM: LAR,LSL,ARPL, and some tracing (RTTraceBuf*).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 584.2 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 47568 2013-08-07 03:11:58Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 if (pImpl != &g_iemAImpl_test)
133 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
134 IEM_MC_ADVANCE_RIP();
135 IEM_MC_END();
136 break;
137
138 case IEMMODE_64BIT:
139 IEM_MC_BEGIN(3, 0);
140 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
141 IEM_MC_ARG(uint64_t, u64Src, 1);
142 IEM_MC_ARG(uint32_t *, pEFlags, 2);
143
144 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
145 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
146 IEM_MC_REF_EFLAGS(pEFlags);
147 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
148
149 IEM_MC_ADVANCE_RIP();
150 IEM_MC_END();
151 break;
152 }
153 }
154 else
155 {
156 /*
157 * We're accessing memory.
158 * Note! We're putting the eflags on the stack here so we can commit them
159 * after the memory.
160 */
161 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
162 switch (pIemCpu->enmEffOpSize)
163 {
164 case IEMMODE_16BIT:
165 IEM_MC_BEGIN(3, 2);
166 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
167 IEM_MC_ARG(uint16_t, u16Src, 1);
168 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
169 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
170
171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
172 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
173 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
174 IEM_MC_FETCH_EFLAGS(EFlags);
175 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
177 else
178 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
179
180 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
181 IEM_MC_COMMIT_EFLAGS(EFlags);
182 IEM_MC_ADVANCE_RIP();
183 IEM_MC_END();
184 break;
185
186 case IEMMODE_32BIT:
187 IEM_MC_BEGIN(3, 2);
188 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
189 IEM_MC_ARG(uint32_t, u32Src, 1);
190 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
191 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
192
193 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
194 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
196 IEM_MC_FETCH_EFLAGS(EFlags);
197 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
199 else
200 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
201
202 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
203 IEM_MC_COMMIT_EFLAGS(EFlags);
204 IEM_MC_ADVANCE_RIP();
205 IEM_MC_END();
206 break;
207
208 case IEMMODE_64BIT:
209 IEM_MC_BEGIN(3, 2);
210 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
211 IEM_MC_ARG(uint64_t, u64Src, 1);
212 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
213 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
214
215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
216 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
217 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
218 IEM_MC_FETCH_EFLAGS(EFlags);
219 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
221 else
222 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
223
224 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
225 IEM_MC_COMMIT_EFLAGS(EFlags);
226 IEM_MC_ADVANCE_RIP();
227 IEM_MC_END();
228 break;
229 }
230 }
231 return VINF_SUCCESS;
232}
233
234
235/**
236 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
237 * the destination.
238 *
239 * @param pImpl Pointer to the instruction implementation (assembly).
240 */
241FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
242{
243 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
244 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
245
246 /*
247 * If rm is denoting a register, no more instruction bytes.
248 */
249 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
250 {
251 IEM_MC_BEGIN(3, 0);
252 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
253 IEM_MC_ARG(uint8_t, u8Src, 1);
254 IEM_MC_ARG(uint32_t *, pEFlags, 2);
255
256 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
257 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
258 IEM_MC_REF_EFLAGS(pEFlags);
259 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
260
261 IEM_MC_ADVANCE_RIP();
262 IEM_MC_END();
263 }
264 else
265 {
266 /*
267 * We're accessing memory.
268 */
269 IEM_MC_BEGIN(3, 1);
270 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
271 IEM_MC_ARG(uint8_t, u8Src, 1);
272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
273 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
274
275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
276 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
277 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
278 IEM_MC_REF_EFLAGS(pEFlags);
279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
280
281 IEM_MC_ADVANCE_RIP();
282 IEM_MC_END();
283 }
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
290 * register as the destination.
291 *
292 * @param pImpl Pointer to the instruction implementation (assembly).
293 */
294FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
295{
296 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
297 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
298
299 /*
300 * If rm is denoting a register, no more instruction bytes.
301 */
302 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
303 {
304 switch (pIemCpu->enmEffOpSize)
305 {
306 case IEMMODE_16BIT:
307 IEM_MC_BEGIN(3, 0);
308 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
309 IEM_MC_ARG(uint16_t, u16Src, 1);
310 IEM_MC_ARG(uint32_t *, pEFlags, 2);
311
312 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
313 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
314 IEM_MC_REF_EFLAGS(pEFlags);
315 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
316
317 IEM_MC_ADVANCE_RIP();
318 IEM_MC_END();
319 break;
320
321 case IEMMODE_32BIT:
322 IEM_MC_BEGIN(3, 0);
323 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
324 IEM_MC_ARG(uint32_t, u32Src, 1);
325 IEM_MC_ARG(uint32_t *, pEFlags, 2);
326
327 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
328 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
329 IEM_MC_REF_EFLAGS(pEFlags);
330 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
331
332 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
333 IEM_MC_ADVANCE_RIP();
334 IEM_MC_END();
335 break;
336
337 case IEMMODE_64BIT:
338 IEM_MC_BEGIN(3, 0);
339 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
340 IEM_MC_ARG(uint64_t, u64Src, 1);
341 IEM_MC_ARG(uint32_t *, pEFlags, 2);
342
343 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
344 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
345 IEM_MC_REF_EFLAGS(pEFlags);
346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
347
348 IEM_MC_ADVANCE_RIP();
349 IEM_MC_END();
350 break;
351 }
352 }
353 else
354 {
355 /*
356 * We're accessing memory.
357 */
358 switch (pIemCpu->enmEffOpSize)
359 {
360 case IEMMODE_16BIT:
361 IEM_MC_BEGIN(3, 1);
362 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
363 IEM_MC_ARG(uint16_t, u16Src, 1);
364 IEM_MC_ARG(uint32_t *, pEFlags, 2);
365 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
366
367 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
368 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
369 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
370 IEM_MC_REF_EFLAGS(pEFlags);
371 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
372
373 IEM_MC_ADVANCE_RIP();
374 IEM_MC_END();
375 break;
376
377 case IEMMODE_32BIT:
378 IEM_MC_BEGIN(3, 1);
379 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
380 IEM_MC_ARG(uint32_t, u32Src, 1);
381 IEM_MC_ARG(uint32_t *, pEFlags, 2);
382 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
383
384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
385 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
386 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
387 IEM_MC_REF_EFLAGS(pEFlags);
388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
389
390 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
391 IEM_MC_ADVANCE_RIP();
392 IEM_MC_END();
393 break;
394
395 case IEMMODE_64BIT:
396 IEM_MC_BEGIN(3, 1);
397 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
398 IEM_MC_ARG(uint64_t, u64Src, 1);
399 IEM_MC_ARG(uint32_t *, pEFlags, 2);
400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
401
402 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
403 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
404 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
405 IEM_MC_REF_EFLAGS(pEFlags);
406 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
407
408 IEM_MC_ADVANCE_RIP();
409 IEM_MC_END();
410 break;
411 }
412 }
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
419 * a byte immediate.
420 *
421 * @param pImpl Pointer to the instruction implementation (assembly).
422 */
423FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
424{
425 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
426 IEMOP_HLP_NO_LOCK_PREFIX();
427
428 IEM_MC_BEGIN(3, 0);
429 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
430 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
431 IEM_MC_ARG(uint32_t *, pEFlags, 2);
432
433 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
434 IEM_MC_REF_EFLAGS(pEFlags);
435 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
436
437 IEM_MC_ADVANCE_RIP();
438 IEM_MC_END();
439 return VINF_SUCCESS;
440}
441
442
443/**
444 * Common worker for instructions like ADD, AND, OR, ++ with working on
445 * AX/EAX/RAX with a word/dword immediate.
446 *
447 * @param pImpl Pointer to the instruction implementation (assembly).
448 */
449FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
450{
451 switch (pIemCpu->enmEffOpSize)
452 {
453 case IEMMODE_16BIT:
454 {
455 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
456 IEMOP_HLP_NO_LOCK_PREFIX();
457
458 IEM_MC_BEGIN(3, 0);
459 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
460 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
461 IEM_MC_ARG(uint32_t *, pEFlags, 2);
462
463 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
464 IEM_MC_REF_EFLAGS(pEFlags);
465 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
466
467 IEM_MC_ADVANCE_RIP();
468 IEM_MC_END();
469 return VINF_SUCCESS;
470 }
471
472 case IEMMODE_32BIT:
473 {
474 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
475 IEMOP_HLP_NO_LOCK_PREFIX();
476
477 IEM_MC_BEGIN(3, 0);
478 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
479 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
480 IEM_MC_ARG(uint32_t *, pEFlags, 2);
481
482 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
483 IEM_MC_REF_EFLAGS(pEFlags);
484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
485
486 if (pImpl != &g_iemAImpl_test)
487 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
488 IEM_MC_ADVANCE_RIP();
489 IEM_MC_END();
490 return VINF_SUCCESS;
491 }
492
493 case IEMMODE_64BIT:
494 {
495 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
496 IEMOP_HLP_NO_LOCK_PREFIX();
497
498 IEM_MC_BEGIN(3, 0);
499 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
500 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
501 IEM_MC_ARG(uint32_t *, pEFlags, 2);
502
503 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
504 IEM_MC_REF_EFLAGS(pEFlags);
505 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
506
507 IEM_MC_ADVANCE_RIP();
508 IEM_MC_END();
509 return VINF_SUCCESS;
510 }
511
512 IEM_NOT_REACHED_DEFAULT_CASE_RET();
513 }
514}
515
516
517/** Opcodes 0xf1, 0xd6. */
518FNIEMOP_DEF(iemOp_Invalid)
519{
520 IEMOP_MNEMONIC("Invalid");
521 return IEMOP_RAISE_INVALID_OPCODE();
522}
523
524
525
526/** @name ..... opcodes.
527 *
528 * @{
529 */
530
531/** @} */
532
533
534/** @name Two byte opcodes (first byte 0x0f).
535 *
536 * @{
537 */
538
539/** Opcode 0x0f 0x00 /0. */
540FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
541{
542 IEMOP_MNEMONIC("sldt Rv/Mw");
543 IEMOP_HLP_NO_REAL_OR_V86_MODE();
544
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
548 switch (pIemCpu->enmEffOpSize)
549 {
550 case IEMMODE_16BIT:
551 IEM_MC_BEGIN(0, 1);
552 IEM_MC_LOCAL(uint16_t, u16Ldtr);
553 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
554 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Ldtr);
555 IEM_MC_ADVANCE_RIP();
556 IEM_MC_END();
557 break;
558
559 case IEMMODE_32BIT:
560 IEM_MC_BEGIN(0, 1);
561 IEM_MC_LOCAL(uint32_t, u32Ldtr);
562 IEM_MC_FETCH_LDTR_U32(u32Ldtr);
563 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Ldtr);
564 IEM_MC_ADVANCE_RIP();
565 IEM_MC_END();
566 break;
567
568 case IEMMODE_64BIT:
569 IEM_MC_BEGIN(0, 1);
570 IEM_MC_LOCAL(uint64_t, u64Ldtr);
571 IEM_MC_FETCH_LDTR_U64(u64Ldtr);
572 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Ldtr);
573 IEM_MC_ADVANCE_RIP();
574 IEM_MC_END();
575 break;
576
577 IEM_NOT_REACHED_DEFAULT_CASE_RET();
578 }
579 }
580 else
581 {
582 IEM_MC_BEGIN(0, 2);
583 IEM_MC_LOCAL(uint16_t, u16Ldtr);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
585 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
586 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
587 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
588 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Ldtr);
589 IEM_MC_ADVANCE_RIP();
590 IEM_MC_END();
591 }
592 return VINF_SUCCESS;
593}
594
595
596/** Opcode 0x0f 0x00 /1. */
597FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
598{
599 IEMOP_MNEMONIC("str Rv/Mw");
600 IEMOP_HLP_NO_REAL_OR_V86_MODE();
601
602 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
603 {
604 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
605 switch (pIemCpu->enmEffOpSize)
606 {
607 case IEMMODE_16BIT:
608 IEM_MC_BEGIN(0, 1);
609 IEM_MC_LOCAL(uint16_t, u16Tr);
610 IEM_MC_FETCH_TR_U16(u16Tr);
611 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tr);
612 IEM_MC_ADVANCE_RIP();
613 IEM_MC_END();
614 break;
615
616 case IEMMODE_32BIT:
617 IEM_MC_BEGIN(0, 1);
618 IEM_MC_LOCAL(uint32_t, u32Tr);
619 IEM_MC_FETCH_TR_U32(u32Tr);
620 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tr);
621 IEM_MC_ADVANCE_RIP();
622 IEM_MC_END();
623 break;
624
625 case IEMMODE_64BIT:
626 IEM_MC_BEGIN(0, 1);
627 IEM_MC_LOCAL(uint64_t, u64Tr);
628 IEM_MC_FETCH_TR_U64(u64Tr);
629 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tr);
630 IEM_MC_ADVANCE_RIP();
631 IEM_MC_END();
632 break;
633
634 IEM_NOT_REACHED_DEFAULT_CASE_RET();
635 }
636 }
637 else
638 {
639 IEM_MC_BEGIN(0, 2);
640 IEM_MC_LOCAL(uint16_t, u16Tr);
641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
642 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
643 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
644 IEM_MC_FETCH_TR_U16(u16Tr);
645 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tr);
646 IEM_MC_ADVANCE_RIP();
647 IEM_MC_END();
648 }
649 return VINF_SUCCESS;
650}
651
652
653/** Opcode 0x0f 0x00 /2. */
654FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
655{
656 IEMOP_MNEMONIC("lldt Ew");
657 IEMOP_HLP_NO_REAL_OR_V86_MODE();
658
659 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
660 {
661 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
662 IEM_MC_BEGIN(1, 0);
663 IEM_MC_ARG(uint16_t, u16Sel, 0);
664 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
665 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
666 IEM_MC_END();
667 }
668 else
669 {
670 IEM_MC_BEGIN(1, 1);
671 IEM_MC_ARG(uint16_t, u16Sel, 0);
672 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
673 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
674 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
675 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */
676 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
677 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
678 IEM_MC_END();
679 }
680 return VINF_SUCCESS;
681}
682
683
684/** Opcode 0x0f 0x00 /3. */
685FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
686{
687 IEMOP_MNEMONIC("ltr Ew");
688 IEMOP_HLP_NO_REAL_OR_V86_MODE();
689
690 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
691 {
692 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
693 IEM_MC_BEGIN(1, 0);
694 IEM_MC_ARG(uint16_t, u16Sel, 0);
695 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
696 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
697 IEM_MC_END();
698 }
699 else
700 {
701 IEM_MC_BEGIN(1, 1);
702 IEM_MC_ARG(uint16_t, u16Sel, 0);
703 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
704 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
705 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
706 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */
707 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
708 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
709 IEM_MC_END();
710 }
711 return VINF_SUCCESS;
712}
713
714
715/** Opcode 0x0f 0x00 /3. */
716FNIEMOP_DEF_2(iemOpCommonGrp6VerX, uint8_t, bRm, bool, fWrite)
717{
718 IEMOP_HLP_NO_REAL_OR_V86_MODE();
719
720 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
721 {
722 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
723 IEM_MC_BEGIN(2, 0);
724 IEM_MC_ARG(uint16_t, u16Sel, 0);
725 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
726 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
727 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
728 IEM_MC_END();
729 }
730 else
731 {
732 IEM_MC_BEGIN(2, 1);
733 IEM_MC_ARG(uint16_t, u16Sel, 0);
734 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
735 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
736 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
737 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
738 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
739 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
740 IEM_MC_END();
741 }
742 return VINF_SUCCESS;
743}
744
745
746/** Opcode 0x0f 0x00 /4. */
747FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm)
748{
749 IEMOP_MNEMONIC("verr Ew");
750 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, false);
751}
752
753
754/** Opcode 0x0f 0x00 /5. */
755FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm)
756{
757 IEMOP_MNEMONIC("verr Ew");
758 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, true);
759}
760
761
762/** Opcode 0x0f 0x00. */
763FNIEMOP_DEF(iemOp_Grp6)
764{
765 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
766 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
767 {
768 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
769 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
770 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
771 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
772 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
773 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
774 case 6: return IEMOP_RAISE_INVALID_OPCODE();
775 case 7: return IEMOP_RAISE_INVALID_OPCODE();
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778
779}
780
781
782/** Opcode 0x0f 0x01 /0. */
783FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
784{
785 IEMOP_MNEMONIC("sgdt Ms");
786 IEMOP_HLP_64BIT_OP_SIZE();
787 IEM_MC_BEGIN(3, 1);
788 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
789 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
790 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
792 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
793 IEM_MC_CALL_CIMPL_3(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
794 IEM_MC_END();
795 return VINF_SUCCESS;
796}
797
798
799/** Opcode 0x0f 0x01 /0. */
800FNIEMOP_DEF(iemOp_Grp7_vmcall)
801{
802 IEMOP_BITCH_ABOUT_STUB();
803 return IEMOP_RAISE_INVALID_OPCODE();
804}
805
806
807/** Opcode 0x0f 0x01 /0. */
808FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
809{
810 IEMOP_BITCH_ABOUT_STUB();
811 return IEMOP_RAISE_INVALID_OPCODE();
812}
813
814
815/** Opcode 0x0f 0x01 /0. */
816FNIEMOP_DEF(iemOp_Grp7_vmresume)
817{
818 IEMOP_BITCH_ABOUT_STUB();
819 return IEMOP_RAISE_INVALID_OPCODE();
820}
821
822
823/** Opcode 0x0f 0x01 /0. */
824FNIEMOP_DEF(iemOp_Grp7_vmxoff)
825{
826 IEMOP_BITCH_ABOUT_STUB();
827 return IEMOP_RAISE_INVALID_OPCODE();
828}
829
830
831/** Opcode 0x0f 0x01 /1. */
832FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
833{
834 IEMOP_MNEMONIC("sidt Ms");
835 IEMOP_HLP_64BIT_OP_SIZE();
836 IEM_MC_BEGIN(3, 1);
837 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
838 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
839 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
840 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
841 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
842 IEM_MC_CALL_CIMPL_3(iemCImpl_sidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
843 IEM_MC_END();
844 return VINF_SUCCESS;
845}
846
847
848/** Opcode 0x0f 0x01 /1. */
849FNIEMOP_DEF(iemOp_Grp7_monitor)
850{
851 IEMOP_MNEMONIC("monitor");
852 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */
853 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pIemCpu->iEffSeg);
854}
855
856
857/** Opcode 0x0f 0x01 /1. */
858FNIEMOP_DEF(iemOp_Grp7_mwait)
859{
860 IEMOP_MNEMONIC("mwait"); /** @todo Verify that mwait is allergic to lock prefixes. */
861 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
862 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait);
863}
864
865
866/** Opcode 0x0f 0x01 /2. */
867FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
868{
869 IEMOP_MNEMONIC("lgdt");
870 IEMOP_HLP_NO_LOCK_PREFIX();
871
872 IEMOP_HLP_64BIT_OP_SIZE();
873 IEM_MC_BEGIN(3, 1);
874 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
875 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
876 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
877 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
878 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
879 IEM_MC_END();
880 return VINF_SUCCESS;
881}
882
883
884/** Opcode 0x0f 0x01 /2. */
885FNIEMOP_DEF(iemOp_Grp7_xgetbv)
886{
887 AssertFailed();
888 return IEMOP_RAISE_INVALID_OPCODE();
889}
890
891
892/** Opcode 0x0f 0x01 /2. */
893FNIEMOP_DEF(iemOp_Grp7_xsetbv)
894{
895 AssertFailed();
896 return IEMOP_RAISE_INVALID_OPCODE();
897}
898
899
900/** Opcode 0x0f 0x01 /3. */
901FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
902{
903 IEMOP_HLP_NO_LOCK_PREFIX();
904
905 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
906 ? IEMMODE_64BIT
907 : pIemCpu->enmEffOpSize;
908 IEM_MC_BEGIN(3, 1);
909 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
910 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
911 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
912 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
913 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
914 IEM_MC_END();
915 return VINF_SUCCESS;
916}
917
918
919/** Opcode 0x0f 0x01 0xd8. */
920FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
921
922/** Opcode 0x0f 0x01 0xd9. */
923FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
924
925/** Opcode 0x0f 0x01 0xda. */
926FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
927
928/** Opcode 0x0f 0x01 0xdb. */
929FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
930
931/** Opcode 0x0f 0x01 0xdc. */
932FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
933
934/** Opcode 0x0f 0x01 0xdd. */
935FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
936
937/** Opcode 0x0f 0x01 0xde. */
938FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
939
940/** Opcode 0x0f 0x01 0xdf. */
941FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
942
943/** Opcode 0x0f 0x01 /4. */
944FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
945{
946 IEMOP_MNEMONIC("smsw");
947 IEMOP_HLP_NO_LOCK_PREFIX();
948 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
949 {
950 switch (pIemCpu->enmEffOpSize)
951 {
952 case IEMMODE_16BIT:
953 IEM_MC_BEGIN(0, 1);
954 IEM_MC_LOCAL(uint16_t, u16Tmp);
955 IEM_MC_FETCH_CR0_U16(u16Tmp);
956 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
957 IEM_MC_ADVANCE_RIP();
958 IEM_MC_END();
959 return VINF_SUCCESS;
960
961 case IEMMODE_32BIT:
962 IEM_MC_BEGIN(0, 1);
963 IEM_MC_LOCAL(uint32_t, u32Tmp);
964 IEM_MC_FETCH_CR0_U32(u32Tmp);
965 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
966 IEM_MC_ADVANCE_RIP();
967 IEM_MC_END();
968 return VINF_SUCCESS;
969
970 case IEMMODE_64BIT:
971 IEM_MC_BEGIN(0, 1);
972 IEM_MC_LOCAL(uint64_t, u64Tmp);
973 IEM_MC_FETCH_CR0_U64(u64Tmp);
974 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
975 IEM_MC_ADVANCE_RIP();
976 IEM_MC_END();
977 return VINF_SUCCESS;
978
979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
980 }
981 }
982 else
983 {
984 /* Ignore operand size here, memory refs are always 16-bit. */
985 IEM_MC_BEGIN(0, 2);
986 IEM_MC_LOCAL(uint16_t, u16Tmp);
987 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
988 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
989 IEM_MC_FETCH_CR0_U16(u16Tmp);
990 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
991 IEM_MC_ADVANCE_RIP();
992 IEM_MC_END();
993 return VINF_SUCCESS;
994 }
995}
996
997
998/** Opcode 0x0f 0x01 /6. */
999FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
1000{
1001 /* The operand size is effectively ignored, all is 16-bit and only the
1002 lower 3-bits are used. */
1003 IEMOP_MNEMONIC("lmsw");
1004 IEMOP_HLP_NO_LOCK_PREFIX();
1005 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1006 {
1007 IEM_MC_BEGIN(1, 0);
1008 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1009 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1010 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1011 IEM_MC_END();
1012 }
1013 else
1014 {
1015 IEM_MC_BEGIN(1, 1);
1016 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1017 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1018 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1019 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
1020 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1021 IEM_MC_END();
1022 }
1023 return VINF_SUCCESS;
1024}
1025
1026
1027/** Opcode 0x0f 0x01 /7. */
1028FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
1029{
1030 IEMOP_MNEMONIC("invlpg");
1031 IEMOP_HLP_NO_LOCK_PREFIX();
1032 IEM_MC_BEGIN(1, 1);
1033 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
1034 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1035 IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
1036 IEM_MC_END();
1037 return VINF_SUCCESS;
1038}
1039
1040
1041/** Opcode 0x0f 0x01 /7. */
1042FNIEMOP_DEF(iemOp_Grp7_swapgs)
1043{
1044 IEMOP_MNEMONIC("swapgs");
1045 IEMOP_HLP_NO_LOCK_PREFIX();
1046 IEMOP_HLP_ONLY_64BIT();
1047 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs);
1048}
1049
1050
1051/** Opcode 0x0f 0x01 /7. */
1052FNIEMOP_DEF(iemOp_Grp7_rdtscp)
1053{
1054 NOREF(pIemCpu);
1055 IEMOP_BITCH_ABOUT_STUB();
1056 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1057}
1058
1059
1060/** Opcode 0x0f 0x01. */
1061FNIEMOP_DEF(iemOp_Grp7)
1062{
1063 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1064 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1065 {
1066 case 0:
1067 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1068 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
1069 switch (bRm & X86_MODRM_RM_MASK)
1070 {
1071 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
1072 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
1073 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
1074 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
1075 }
1076 return IEMOP_RAISE_INVALID_OPCODE();
1077
1078 case 1:
1079 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1080 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
1081 switch (bRm & X86_MODRM_RM_MASK)
1082 {
1083 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
1084 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
1085 }
1086 return IEMOP_RAISE_INVALID_OPCODE();
1087
1088 case 2:
1089 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1090 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
1091 switch (bRm & X86_MODRM_RM_MASK)
1092 {
1093 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
1094 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
1095 }
1096 return IEMOP_RAISE_INVALID_OPCODE();
1097
1098 case 3:
1099 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1100 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
1101 switch (bRm & X86_MODRM_RM_MASK)
1102 {
1103 case 0: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmrun);
1104 case 1: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmmcall);
1105 case 2: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmload);
1106 case 3: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmsave);
1107 case 4: return FNIEMOP_CALL(iemOp_Grp7_Amd_stgi);
1108 case 5: return FNIEMOP_CALL(iemOp_Grp7_Amd_clgi);
1109 case 6: return FNIEMOP_CALL(iemOp_Grp7_Amd_skinit);
1110 case 7: return FNIEMOP_CALL(iemOp_Grp7_Amd_invlpga);
1111 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1112 }
1113
1114 case 4:
1115 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
1116
1117 case 5:
1118 return IEMOP_RAISE_INVALID_OPCODE();
1119
1120 case 6:
1121 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
1122
1123 case 7:
1124 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1125 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
1126 switch (bRm & X86_MODRM_RM_MASK)
1127 {
1128 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
1129 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
1130 }
1131 return IEMOP_RAISE_INVALID_OPCODE();
1132
1133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1134 }
1135}
1136
1137/** Opcode 0x0f 0x00 /3. */
1138FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar)
1139{
1140 IEMOP_HLP_NO_REAL_OR_V86_MODE();
1141 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1142
1143 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1144 {
1145 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1146 switch (pIemCpu->enmEffOpSize)
1147 {
1148 case IEMMODE_16BIT:
1149 {
1150 IEM_MC_BEGIN(4, 0);
1151 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1152 IEM_MC_ARG(uint16_t, u16Sel, 1);
1153 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1154 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1155
1156 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1157 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1158 IEM_MC_REF_EFLAGS(pEFlags);
1159 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1160
1161 IEM_MC_END();
1162 return VINF_SUCCESS;
1163 }
1164
1165 case IEMMODE_32BIT:
1166 case IEMMODE_64BIT:
1167 {
1168 IEM_MC_BEGIN(4, 0);
1169 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1170 IEM_MC_ARG(uint16_t, u16Sel, 1);
1171 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1172 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1173
1174 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1175 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1176 IEM_MC_REF_EFLAGS(pEFlags);
1177 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1178
1179 IEM_MC_END();
1180 return VINF_SUCCESS;
1181 }
1182
1183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1184 }
1185 }
1186 else
1187 {
1188 switch (pIemCpu->enmEffOpSize)
1189 {
1190 case IEMMODE_16BIT:
1191 {
1192 IEM_MC_BEGIN(4, 1);
1193 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1194 IEM_MC_ARG(uint16_t, u16Sel, 1);
1195 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1196 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1197 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1198
1199 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1200 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1201
1202 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1203 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1204 IEM_MC_REF_EFLAGS(pEFlags);
1205 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1206
1207 IEM_MC_END();
1208 return VINF_SUCCESS;
1209 }
1210
1211 case IEMMODE_32BIT:
1212 case IEMMODE_64BIT:
1213 {
1214 IEM_MC_BEGIN(4, 1);
1215 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1216 IEM_MC_ARG(uint16_t, u16Sel, 1);
1217 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1218 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1219 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1220
1221 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1222 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1223
1224 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1225 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1226 IEM_MC_REF_EFLAGS(pEFlags);
1227 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1228
1229 IEM_MC_END();
1230 return VINF_SUCCESS;
1231 }
1232
1233 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1234 }
1235 }
1236}
1237
1238
1239
1240/** Opcode 0x0f 0x02. */
1241FNIEMOP_DEF(iemOp_lar_Gv_Ew)
1242{
1243 IEMOP_MNEMONIC("lar Gv,Ew");
1244 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true);
1245}
1246
1247
1248/** Opcode 0x0f 0x03. */
1249FNIEMOP_DEF(iemOp_lsl_Gv_Ew)
1250{
1251 IEMOP_MNEMONIC("lsl Gv,Ew");
1252 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false);
1253}
1254
1255
1256/** Opcode 0x0f 0x04. */
1257FNIEMOP_DEF(iemOp_syscall)
1258{
1259 IEMOP_MNEMONIC("syscall");
1260 IEMOP_HLP_NO_LOCK_PREFIX();
1261 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall);
1262}
1263
1264
1265/** Opcode 0x0f 0x05. */
1266FNIEMOP_DEF(iemOp_clts)
1267{
1268 IEMOP_MNEMONIC("clts");
1269 IEMOP_HLP_NO_LOCK_PREFIX();
1270 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
1271}
1272
1273
1274/** Opcode 0x0f 0x06. */
1275FNIEMOP_DEF(iemOp_sysret)
1276{
1277 IEMOP_MNEMONIC("sysret");
1278 IEMOP_HLP_NO_LOCK_PREFIX();
1279 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret);
1280}
1281
1282
1283/** Opcode 0x0f 0x08. */
1284FNIEMOP_STUB(iemOp_invd);
1285
1286
1287/** Opcode 0x0f 0x09. */
1288FNIEMOP_DEF(iemOp_wbinvd)
1289{
1290 IEMOP_MNEMONIC("wbinvd");
1291 IEMOP_HLP_NO_LOCK_PREFIX();
1292 IEM_MC_BEGIN(0, 0);
1293 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
1294 IEM_MC_ADVANCE_RIP();
1295 IEM_MC_END();
1296 return VINF_SUCCESS; /* ignore for now */
1297}
1298
1299
1300/** Opcode 0x0f 0x0b. */
1301FNIEMOP_STUB(iemOp_ud2);
1302
1303/** Opcode 0x0f 0x0d. */
1304FNIEMOP_DEF(iemOp_nop_Ev_GrpP)
1305{
1306 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
1307 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
1308 X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF))
1309 {
1310 IEMOP_MNEMONIC("GrpP");
1311 return IEMOP_RAISE_INVALID_OPCODE();
1312 }
1313
1314 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1315 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1316 {
1317 IEMOP_MNEMONIC("GrpP");
1318 return IEMOP_RAISE_INVALID_OPCODE();
1319 }
1320
1321 IEMOP_HLP_NO_LOCK_PREFIX();
1322 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1323 {
1324 case 2: /* Aliased to /0 for the time being. */
1325 case 4: /* Aliased to /0 for the time being. */
1326 case 5: /* Aliased to /0 for the time being. */
1327 case 6: /* Aliased to /0 for the time being. */
1328 case 7: /* Aliased to /0 for the time being. */
1329 case 0: IEMOP_MNEMONIC("prefetch"); break;
1330 case 1: IEMOP_MNEMONIC("prefetchw "); break;
1331 case 3: IEMOP_MNEMONIC("prefetchw"); break;
1332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1333 }
1334
1335 IEM_MC_BEGIN(0, 1);
1336 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1337 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1338 /* Currently a NOP. */
1339 IEM_MC_ADVANCE_RIP();
1340 IEM_MC_END();
1341 return VINF_SUCCESS;
1342}
1343
1344
1345/** Opcode 0x0f 0x0e. */
1346FNIEMOP_STUB(iemOp_femms);
1347
1348
1349/** Opcode 0x0f 0x0f 0x0c. */
1350FNIEMOP_STUB(iemOp_3Dnow_pi2fw_Pq_Qq);
1351
1352/** Opcode 0x0f 0x0f 0x0d. */
1353FNIEMOP_STUB(iemOp_3Dnow_pi2fd_Pq_Qq);
1354
1355/** Opcode 0x0f 0x0f 0x1c. */
1356FNIEMOP_STUB(iemOp_3Dnow_pf2fw_Pq_Qq);
1357
1358/** Opcode 0x0f 0x0f 0x1d. */
1359FNIEMOP_STUB(iemOp_3Dnow_pf2fd_Pq_Qq);
1360
1361/** Opcode 0x0f 0x0f 0x8a. */
1362FNIEMOP_STUB(iemOp_3Dnow_pfnacc_Pq_Qq);
1363
1364/** Opcode 0x0f 0x0f 0x8e. */
1365FNIEMOP_STUB(iemOp_3Dnow_pfpnacc_Pq_Qq);
1366
1367/** Opcode 0x0f 0x0f 0x90. */
1368FNIEMOP_STUB(iemOp_3Dnow_pfcmpge_Pq_Qq);
1369
1370/** Opcode 0x0f 0x0f 0x94. */
1371FNIEMOP_STUB(iemOp_3Dnow_pfmin_Pq_Qq);
1372
1373/** Opcode 0x0f 0x0f 0x96. */
1374FNIEMOP_STUB(iemOp_3Dnow_pfrcp_Pq_Qq);
1375
1376/** Opcode 0x0f 0x0f 0x97. */
1377FNIEMOP_STUB(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1378
1379/** Opcode 0x0f 0x0f 0x9a. */
1380FNIEMOP_STUB(iemOp_3Dnow_pfsub_Pq_Qq);
1381
1382/** Opcode 0x0f 0x0f 0x9e. */
1383FNIEMOP_STUB(iemOp_3Dnow_pfadd_PQ_Qq);
1384
1385/** Opcode 0x0f 0x0f 0xa0. */
1386FNIEMOP_STUB(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1387
1388/** Opcode 0x0f 0x0f 0xa4. */
1389FNIEMOP_STUB(iemOp_3Dnow_pfmax_Pq_Qq);
1390
1391/** Opcode 0x0f 0x0f 0xa6. */
1392FNIEMOP_STUB(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1393
1394/** Opcode 0x0f 0x0f 0xa7. */
1395FNIEMOP_STUB(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1396
1397/** Opcode 0x0f 0x0f 0xaa. */
1398FNIEMOP_STUB(iemOp_3Dnow_pfsubr_Pq_Qq);
1399
1400/** Opcode 0x0f 0x0f 0xae. */
1401FNIEMOP_STUB(iemOp_3Dnow_pfacc_PQ_Qq);
1402
1403/** Opcode 0x0f 0x0f 0xb0. */
1404FNIEMOP_STUB(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1405
1406/** Opcode 0x0f 0x0f 0xb4. */
1407FNIEMOP_STUB(iemOp_3Dnow_pfmul_Pq_Qq);
1408
1409/** Opcode 0x0f 0x0f 0xb6. */
1410FNIEMOP_STUB(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1411
1412/** Opcode 0x0f 0x0f 0xb7. */
1413FNIEMOP_STUB(iemOp_3Dnow_pmulhrw_Pq_Qq);
1414
1415/** Opcode 0x0f 0x0f 0xbb. */
1416FNIEMOP_STUB(iemOp_3Dnow_pswapd_Pq_Qq);
1417
1418/** Opcode 0x0f 0x0f 0xbf. */
1419FNIEMOP_STUB(iemOp_3Dnow_pavgusb_PQ_Qq);
1420
1421
1422/** Opcode 0x0f 0x0f. */
1423FNIEMOP_DEF(iemOp_3Dnow)
1424{
1425 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_3DNOW))
1426 {
1427 IEMOP_MNEMONIC("3Dnow");
1428 return IEMOP_RAISE_INVALID_OPCODE();
1429 }
1430
1431 /* This is pretty sparse, use switch instead of table. */
1432 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
1433 switch (b)
1434 {
1435 case 0x0c: return FNIEMOP_CALL(iemOp_3Dnow_pi2fw_Pq_Qq);
1436 case 0x0d: return FNIEMOP_CALL(iemOp_3Dnow_pi2fd_Pq_Qq);
1437 case 0x1c: return FNIEMOP_CALL(iemOp_3Dnow_pf2fw_Pq_Qq);
1438 case 0x1d: return FNIEMOP_CALL(iemOp_3Dnow_pf2fd_Pq_Qq);
1439 case 0x8a: return FNIEMOP_CALL(iemOp_3Dnow_pfnacc_Pq_Qq);
1440 case 0x8e: return FNIEMOP_CALL(iemOp_3Dnow_pfpnacc_Pq_Qq);
1441 case 0x90: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpge_Pq_Qq);
1442 case 0x94: return FNIEMOP_CALL(iemOp_3Dnow_pfmin_Pq_Qq);
1443 case 0x96: return FNIEMOP_CALL(iemOp_3Dnow_pfrcp_Pq_Qq);
1444 case 0x97: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1445 case 0x9a: return FNIEMOP_CALL(iemOp_3Dnow_pfsub_Pq_Qq);
1446 case 0x9e: return FNIEMOP_CALL(iemOp_3Dnow_pfadd_PQ_Qq);
1447 case 0xa0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1448 case 0xa4: return FNIEMOP_CALL(iemOp_3Dnow_pfmax_Pq_Qq);
1449 case 0xa6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1450 case 0xa7: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1451 case 0xaa: return FNIEMOP_CALL(iemOp_3Dnow_pfsubr_Pq_Qq);
1452 case 0xae: return FNIEMOP_CALL(iemOp_3Dnow_pfacc_PQ_Qq);
1453 case 0xb0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1454 case 0xb4: return FNIEMOP_CALL(iemOp_3Dnow_pfmul_Pq_Qq);
1455 case 0xb6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1456 case 0xb7: return FNIEMOP_CALL(iemOp_3Dnow_pmulhrw_Pq_Qq);
1457 case 0xbb: return FNIEMOP_CALL(iemOp_3Dnow_pswapd_Pq_Qq);
1458 case 0xbf: return FNIEMOP_CALL(iemOp_3Dnow_pavgusb_PQ_Qq);
1459 default:
1460 return IEMOP_RAISE_INVALID_OPCODE();
1461 }
1462}
1463
1464
1465/** Opcode 0x0f 0x10. */
1466FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
1467/** Opcode 0x0f 0x11. */
1468FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
1469/** Opcode 0x0f 0x12. */
1470FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); //NEXT
1471/** Opcode 0x0f 0x13. */
1472FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); //NEXT
1473/** Opcode 0x0f 0x14. */
1474FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
1475/** Opcode 0x0f 0x15. */
1476FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
1477/** Opcode 0x0f 0x16. */
1478FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); //NEXT
1479/** Opcode 0x0f 0x17. */
1480FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); //NEXT
1481
1482
1483/** Opcode 0x0f 0x18. */
1484FNIEMOP_DEF(iemOp_prefetch_Grp16)
1485{
1486 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1487 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1488 {
1489 IEMOP_HLP_NO_LOCK_PREFIX();
1490 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1491 {
1492 case 4: /* Aliased to /0 for the time being according to AMD. */
1493 case 5: /* Aliased to /0 for the time being according to AMD. */
1494 case 6: /* Aliased to /0 for the time being according to AMD. */
1495 case 7: /* Aliased to /0 for the time being according to AMD. */
1496 case 0: IEMOP_MNEMONIC("prefetchNTA m8"); break;
1497 case 1: IEMOP_MNEMONIC("prefetchT0 m8"); break;
1498 case 2: IEMOP_MNEMONIC("prefetchT1 m8"); break;
1499 case 3: IEMOP_MNEMONIC("prefetchT2 m8"); break;
1500 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1501 }
1502
1503 IEM_MC_BEGIN(0, 1);
1504 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1505 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1506 /* Currently a NOP. */
1507 IEM_MC_ADVANCE_RIP();
1508 IEM_MC_END();
1509 return VINF_SUCCESS;
1510 }
1511
1512 return IEMOP_RAISE_INVALID_OPCODE();
1513}
1514
1515
1516/** Opcode 0x0f 0x19..0x1f. */
1517FNIEMOP_DEF(iemOp_nop_Ev)
1518{
1519 IEMOP_HLP_NO_LOCK_PREFIX();
1520 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1521 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1522 {
1523 IEM_MC_BEGIN(0, 0);
1524 IEM_MC_ADVANCE_RIP();
1525 IEM_MC_END();
1526 }
1527 else
1528 {
1529 IEM_MC_BEGIN(0, 1);
1530 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1531 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1532 /* Currently a NOP. */
1533 IEM_MC_ADVANCE_RIP();
1534 IEM_MC_END();
1535 }
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/** Opcode 0x0f 0x20. */
1541FNIEMOP_DEF(iemOp_mov_Rd_Cd)
1542{
1543 /* mod is ignored, as is operand size overrides. */
1544 IEMOP_MNEMONIC("mov Rd,Cd");
1545 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1546 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1547 else
1548 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1549
1550 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1551 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1552 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1553 {
1554 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1555 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1556 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1557 iCrReg |= 8;
1558 }
1559 switch (iCrReg)
1560 {
1561 case 0: case 2: case 3: case 4: case 8:
1562 break;
1563 default:
1564 return IEMOP_RAISE_INVALID_OPCODE();
1565 }
1566 IEMOP_HLP_DONE_DECODING();
1567
1568 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
1569}
1570
1571
1572/** Opcode 0x0f 0x21. */
1573FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1574{
1575 IEMOP_MNEMONIC("mov Rd,Dd");
1576 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1577 IEMOP_HLP_NO_LOCK_PREFIX();
1578 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1579 return IEMOP_RAISE_INVALID_OPCODE();
1580 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1581 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1582 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1583}
1584
1585
1586/** Opcode 0x0f 0x22. */
1587FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1588{
1589 /* mod is ignored, as is operand size overrides. */
1590 IEMOP_MNEMONIC("mov Cd,Rd");
1591 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1592 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1593 else
1594 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1595
1596 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1597 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1598 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1599 {
1600 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1601 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1602 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1603 iCrReg |= 8;
1604 }
1605 switch (iCrReg)
1606 {
1607 case 0: case 2: case 3: case 4: case 8:
1608 break;
1609 default:
1610 return IEMOP_RAISE_INVALID_OPCODE();
1611 }
1612 IEMOP_HLP_DONE_DECODING();
1613
1614 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1615}
1616
1617
1618/** Opcode 0x0f 0x23. */
1619FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1620{
1621 IEMOP_MNEMONIC("mov Dd,Rd");
1622 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1623 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1624 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1625 return IEMOP_RAISE_INVALID_OPCODE();
1626 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1627 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1628 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1629}
1630
1631
1632/** Opcode 0x0f 0x24. */
1633FNIEMOP_DEF(iemOp_mov_Rd_Td)
1634{
1635 IEMOP_MNEMONIC("mov Rd,Td");
1636 /* The RM byte is not considered, see testcase. */
1637 return IEMOP_RAISE_INVALID_OPCODE();
1638}
1639
1640
1641/** Opcode 0x0f 0x26. */
1642FNIEMOP_DEF(iemOp_mov_Td_Rd)
1643{
1644 IEMOP_MNEMONIC("mov Td,Rd");
1645 /* The RM byte is not considered, see testcase. */
1646 return IEMOP_RAISE_INVALID_OPCODE();
1647}
1648
1649
1650/** Opcode 0x0f 0x28. */
1651FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1652/** Opcode 0x0f 0x29. */
1653FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1654/** Opcode 0x0f 0x2a. */
1655FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); //NEXT
1656/** Opcode 0x0f 0x2b. */
1657FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1658/** Opcode 0x0f 0x2c. */
1659FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); //NEXT
1660/** Opcode 0x0f 0x2d. */
1661FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1662/** Opcode 0x0f 0x2e. */
1663FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); //NEXT
1664/** Opcode 0x0f 0x2f. */
1665FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1666
1667
1668/** Opcode 0x0f 0x30. */
1669FNIEMOP_DEF(iemOp_wrmsr)
1670{
1671 IEMOP_MNEMONIC("wrmsr");
1672 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1673 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_wrmsr);
1674}
1675
1676
1677/** Opcode 0x0f 0x31. */
1678FNIEMOP_DEF(iemOp_rdtsc)
1679{
1680 IEMOP_MNEMONIC("rdtsc");
1681 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1682 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1683}
1684
1685
1686/** Opcode 0x0f 0x33. */
1687FNIEMOP_DEF(iemOp_rdmsr)
1688{
1689 IEMOP_MNEMONIC("rdmsr");
1690 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1691 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdmsr);
1692}
1693
1694
1695/** Opcode 0x0f 0x34. */
1696FNIEMOP_STUB(iemOp_rdpmc);
1697/** Opcode 0x0f 0x34. */
1698FNIEMOP_STUB(iemOp_sysenter);
1699/** Opcode 0x0f 0x35. */
1700FNIEMOP_STUB(iemOp_sysexit);
1701/** Opcode 0x0f 0x37. */
1702FNIEMOP_STUB(iemOp_getsec);
1703/** Opcode 0x0f 0x38. */
1704FNIEMOP_UD_STUB(iemOp_3byte_Esc_A4); /* Here there be dragons... */
1705/** Opcode 0x0f 0x3a. */
1706FNIEMOP_UD_STUB(iemOp_3byte_Esc_A5); /* Here there be dragons... */
1707/** Opcode 0x0f 0x3c (?). */
1708FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1709
1710/**
1711 * Implements a conditional move.
1712 *
1713 * Wish there was an obvious way to do this where we could share and reduce
1714 * code bloat.
1715 *
1716 * @param a_Cnd The conditional "microcode" operation.
1717 */
1718#define CMOV_X(a_Cnd) \
1719 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1720 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1721 { \
1722 switch (pIemCpu->enmEffOpSize) \
1723 { \
1724 case IEMMODE_16BIT: \
1725 IEM_MC_BEGIN(0, 1); \
1726 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1727 a_Cnd { \
1728 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1729 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1730 } IEM_MC_ENDIF(); \
1731 IEM_MC_ADVANCE_RIP(); \
1732 IEM_MC_END(); \
1733 return VINF_SUCCESS; \
1734 \
1735 case IEMMODE_32BIT: \
1736 IEM_MC_BEGIN(0, 1); \
1737 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1738 a_Cnd { \
1739 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1740 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1741 } IEM_MC_ELSE() { \
1742 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1743 } IEM_MC_ENDIF(); \
1744 IEM_MC_ADVANCE_RIP(); \
1745 IEM_MC_END(); \
1746 return VINF_SUCCESS; \
1747 \
1748 case IEMMODE_64BIT: \
1749 IEM_MC_BEGIN(0, 1); \
1750 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1751 a_Cnd { \
1752 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1753 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1754 } IEM_MC_ENDIF(); \
1755 IEM_MC_ADVANCE_RIP(); \
1756 IEM_MC_END(); \
1757 return VINF_SUCCESS; \
1758 \
1759 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1760 } \
1761 } \
1762 else \
1763 { \
1764 switch (pIemCpu->enmEffOpSize) \
1765 { \
1766 case IEMMODE_16BIT: \
1767 IEM_MC_BEGIN(0, 2); \
1768 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1769 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1770 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1771 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1772 a_Cnd { \
1773 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1774 } IEM_MC_ENDIF(); \
1775 IEM_MC_ADVANCE_RIP(); \
1776 IEM_MC_END(); \
1777 return VINF_SUCCESS; \
1778 \
1779 case IEMMODE_32BIT: \
1780 IEM_MC_BEGIN(0, 2); \
1781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1782 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1783 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1784 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1785 a_Cnd { \
1786 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1787 } IEM_MC_ELSE() { \
1788 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1789 } IEM_MC_ENDIF(); \
1790 IEM_MC_ADVANCE_RIP(); \
1791 IEM_MC_END(); \
1792 return VINF_SUCCESS; \
1793 \
1794 case IEMMODE_64BIT: \
1795 IEM_MC_BEGIN(0, 2); \
1796 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1797 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1799 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1800 a_Cnd { \
1801 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1802 } IEM_MC_ENDIF(); \
1803 IEM_MC_ADVANCE_RIP(); \
1804 IEM_MC_END(); \
1805 return VINF_SUCCESS; \
1806 \
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1808 } \
1809 } do {} while (0)
1810
1811
1812
1813/** Opcode 0x0f 0x40. */
1814FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1815{
1816 IEMOP_MNEMONIC("cmovo Gv,Ev");
1817 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1818}
1819
1820
1821/** Opcode 0x0f 0x41. */
1822FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1823{
1824 IEMOP_MNEMONIC("cmovno Gv,Ev");
1825 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1826}
1827
1828
1829/** Opcode 0x0f 0x42. */
1830FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1831{
1832 IEMOP_MNEMONIC("cmovc Gv,Ev");
1833 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1834}
1835
1836
1837/** Opcode 0x0f 0x43. */
1838FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1839{
1840 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1841 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1842}
1843
1844
1845/** Opcode 0x0f 0x44. */
1846FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1847{
1848 IEMOP_MNEMONIC("cmove Gv,Ev");
1849 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1850}
1851
1852
1853/** Opcode 0x0f 0x45. */
1854FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1855{
1856 IEMOP_MNEMONIC("cmovne Gv,Ev");
1857 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1858}
1859
1860
1861/** Opcode 0x0f 0x46. */
1862FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1863{
1864 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1865 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1866}
1867
1868
1869/** Opcode 0x0f 0x47. */
1870FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1871{
1872 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1873 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1874}
1875
1876
1877/** Opcode 0x0f 0x48. */
1878FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1879{
1880 IEMOP_MNEMONIC("cmovs Gv,Ev");
1881 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1882}
1883
1884
1885/** Opcode 0x0f 0x49. */
1886FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1887{
1888 IEMOP_MNEMONIC("cmovns Gv,Ev");
1889 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1890}
1891
1892
1893/** Opcode 0x0f 0x4a. */
1894FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1895{
1896 IEMOP_MNEMONIC("cmovp Gv,Ev");
1897 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1898}
1899
1900
1901/** Opcode 0x0f 0x4b. */
1902FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1903{
1904 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1905 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1906}
1907
1908
1909/** Opcode 0x0f 0x4c. */
1910FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1911{
1912 IEMOP_MNEMONIC("cmovl Gv,Ev");
1913 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1914}
1915
1916
1917/** Opcode 0x0f 0x4d. */
1918FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1919{
1920 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1921 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1922}
1923
1924
1925/** Opcode 0x0f 0x4e. */
1926FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1927{
1928 IEMOP_MNEMONIC("cmovle Gv,Ev");
1929 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1930}
1931
1932
1933/** Opcode 0x0f 0x4f. */
1934FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1935{
1936 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1937 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1938}
1939
1940#undef CMOV_X
1941
1942/** Opcode 0x0f 0x50. */
1943FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1944/** Opcode 0x0f 0x51. */
1945FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1946/** Opcode 0x0f 0x52. */
1947FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1948/** Opcode 0x0f 0x53. */
1949FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1950/** Opcode 0x0f 0x54. */
1951FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1952/** Opcode 0x0f 0x55. */
1953FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1954/** Opcode 0x0f 0x56. */
1955FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1956/** Opcode 0x0f 0x57. */
1957FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1958/** Opcode 0x0f 0x58. */
1959FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); //NEXT
1960/** Opcode 0x0f 0x59. */
1961FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);//NEXT
1962/** Opcode 0x0f 0x5a. */
1963FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1964/** Opcode 0x0f 0x5b. */
1965FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1966/** Opcode 0x0f 0x5c. */
1967FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1968/** Opcode 0x0f 0x5d. */
1969FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1970/** Opcode 0x0f 0x5e. */
1971FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1972/** Opcode 0x0f 0x5f. */
1973FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1974
1975
1976/**
1977 * Common worker for SSE2 and MMX instructions on the forms:
1978 * pxxxx xmm1, xmm2/mem128
1979 * pxxxx mm1, mm2/mem32
1980 *
1981 * The 2nd operand is the first half of a register, which in the memory case
1982 * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit
1983 * memory accessed for MMX.
1984 *
1985 * Exceptions type 4.
1986 */
1987FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl)
1988{
1989 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1990 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
1991 {
1992 case IEM_OP_PRF_SIZE_OP: /* SSE */
1993 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1994 {
1995 /*
1996 * Register, register.
1997 */
1998 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1999 IEM_MC_BEGIN(2, 0);
2000 IEM_MC_ARG(uint128_t *, pDst, 0);
2001 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2002 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2003 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2004 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2005 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2006 IEM_MC_ADVANCE_RIP();
2007 IEM_MC_END();
2008 }
2009 else
2010 {
2011 /*
2012 * Register, memory.
2013 */
2014 IEM_MC_BEGIN(2, 2);
2015 IEM_MC_ARG(uint128_t *, pDst, 0);
2016 IEM_MC_LOCAL(uint64_t, uSrc);
2017 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2018 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2019
2020 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2021 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2022 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2023 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2024
2025 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2026 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2027
2028 IEM_MC_ADVANCE_RIP();
2029 IEM_MC_END();
2030 }
2031 return VINF_SUCCESS;
2032
2033 case 0: /* MMX */
2034 if (!pImpl->pfnU64)
2035 return IEMOP_RAISE_INVALID_OPCODE();
2036 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2037 {
2038 /*
2039 * Register, register.
2040 */
2041 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2042 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2043 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2044 IEM_MC_BEGIN(2, 0);
2045 IEM_MC_ARG(uint64_t *, pDst, 0);
2046 IEM_MC_ARG(uint32_t const *, pSrc, 1);
2047 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2048 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2049 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2050 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2051 IEM_MC_ADVANCE_RIP();
2052 IEM_MC_END();
2053 }
2054 else
2055 {
2056 /*
2057 * Register, memory.
2058 */
2059 IEM_MC_BEGIN(2, 2);
2060 IEM_MC_ARG(uint64_t *, pDst, 0);
2061 IEM_MC_LOCAL(uint32_t, uSrc);
2062 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1);
2063 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2064
2065 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2066 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2067 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2068 IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2069
2070 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2071 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2072
2073 IEM_MC_ADVANCE_RIP();
2074 IEM_MC_END();
2075 }
2076 return VINF_SUCCESS;
2077
2078 default:
2079 return IEMOP_RAISE_INVALID_OPCODE();
2080 }
2081}
2082
2083
2084/** Opcode 0x0f 0x60. */
2085FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq)
2086{
2087 IEMOP_MNEMONIC("punpcklbw");
2088 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw);
2089}
2090
2091
2092/** Opcode 0x0f 0x61. */
2093FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq)
2094{
2095 IEMOP_MNEMONIC("punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */
2096 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd);
2097}
2098
2099
2100/** Opcode 0x0f 0x62. */
2101FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq)
2102{
2103 IEMOP_MNEMONIC("punpckldq");
2104 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq);
2105}
2106
2107
2108/** Opcode 0x0f 0x63. */
2109FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
2110/** Opcode 0x0f 0x64. */
2111FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
2112/** Opcode 0x0f 0x65. */
2113FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
2114/** Opcode 0x0f 0x66. */
2115FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
2116/** Opcode 0x0f 0x67. */
2117FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
2118
2119
2120/**
2121 * Common worker for SSE2 and MMX instructions on the forms:
2122 * pxxxx xmm1, xmm2/mem128
2123 * pxxxx mm1, mm2/mem64
2124 *
2125 * The 2nd operand is the second half of a register, which in the memory case
2126 * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access
2127 * where it may read the full 128 bits or only the upper 64 bits.
2128 *
2129 * Exceptions type 4.
2130 */
2131FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl)
2132{
2133 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2134 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2135 {
2136 case IEM_OP_PRF_SIZE_OP: /* SSE */
2137 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2138 {
2139 /*
2140 * Register, register.
2141 */
2142 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2143 IEM_MC_BEGIN(2, 0);
2144 IEM_MC_ARG(uint128_t *, pDst, 0);
2145 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2146 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2147 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2148 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2149 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2150 IEM_MC_ADVANCE_RIP();
2151 IEM_MC_END();
2152 }
2153 else
2154 {
2155 /*
2156 * Register, memory.
2157 */
2158 IEM_MC_BEGIN(2, 2);
2159 IEM_MC_ARG(uint128_t *, pDst, 0);
2160 IEM_MC_LOCAL(uint128_t, uSrc);
2161 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2163
2164 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2165 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2166 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2167 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */
2168
2169 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2170 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2171
2172 IEM_MC_ADVANCE_RIP();
2173 IEM_MC_END();
2174 }
2175 return VINF_SUCCESS;
2176
2177 case 0: /* MMX */
2178 if (!pImpl->pfnU64)
2179 return IEMOP_RAISE_INVALID_OPCODE();
2180 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2181 {
2182 /*
2183 * Register, register.
2184 */
2185 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2186 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2187 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2188 IEM_MC_BEGIN(2, 0);
2189 IEM_MC_ARG(uint64_t *, pDst, 0);
2190 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2191 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2192 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2193 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2194 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2195 IEM_MC_ADVANCE_RIP();
2196 IEM_MC_END();
2197 }
2198 else
2199 {
2200 /*
2201 * Register, memory.
2202 */
2203 IEM_MC_BEGIN(2, 2);
2204 IEM_MC_ARG(uint64_t *, pDst, 0);
2205 IEM_MC_LOCAL(uint64_t, uSrc);
2206 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2207 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2208
2209 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2210 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2211 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2212 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2213
2214 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2215 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2216
2217 IEM_MC_ADVANCE_RIP();
2218 IEM_MC_END();
2219 }
2220 return VINF_SUCCESS;
2221
2222 default:
2223 return IEMOP_RAISE_INVALID_OPCODE();
2224 }
2225}
2226
2227
2228/** Opcode 0x0f 0x68. */
2229FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq)
2230{
2231 IEMOP_MNEMONIC("punpckhbw");
2232 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw);
2233}
2234
2235
2236/** Opcode 0x0f 0x69. */
2237FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq)
2238{
2239 IEMOP_MNEMONIC("punpckhwd");
2240 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd);
2241}
2242
2243
2244/** Opcode 0x0f 0x6a. */
2245FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq)
2246{
2247 IEMOP_MNEMONIC("punpckhdq");
2248 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq);
2249}
2250
2251/** Opcode 0x0f 0x6b. */
2252FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
2253
2254
2255/** Opcode 0x0f 0x6c. */
2256FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq)
2257{
2258 IEMOP_MNEMONIC("punpcklqdq");
2259 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq);
2260}
2261
2262
2263/** Opcode 0x0f 0x6d. */
2264FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq)
2265{
2266 IEMOP_MNEMONIC("punpckhqdq");
2267 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq);
2268}
2269
2270
2271/** Opcode 0x0f 0x6e. */
2272FNIEMOP_DEF(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey)
2273{
2274 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2275 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2276 {
2277 case IEM_OP_PRF_SIZE_OP: /* SSE */
2278 IEMOP_MNEMONIC("movd/q Wd/q,Ed/q");
2279 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2280 {
2281 /* XMM, greg*/
2282 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2283 IEM_MC_BEGIN(0, 1);
2284 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2285 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2286 {
2287 IEM_MC_LOCAL(uint64_t, u64Tmp);
2288 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2289 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2290 }
2291 else
2292 {
2293 IEM_MC_LOCAL(uint32_t, u32Tmp);
2294 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2295 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2296 }
2297 IEM_MC_ADVANCE_RIP();
2298 IEM_MC_END();
2299 }
2300 else
2301 {
2302 /* XMM, [mem] */
2303 IEM_MC_BEGIN(0, 2);
2304 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2305 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2306 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2307 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2308 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2309 {
2310 IEM_MC_LOCAL(uint64_t, u64Tmp);
2311 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2312 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2313 }
2314 else
2315 {
2316 IEM_MC_LOCAL(uint32_t, u32Tmp);
2317 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2318 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2319 }
2320 IEM_MC_ADVANCE_RIP();
2321 IEM_MC_END();
2322 }
2323 return VINF_SUCCESS;
2324
2325 case 0: /* MMX */
2326 IEMOP_MNEMONIC("movq/d Pd/q,Ed/q");
2327 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2328 {
2329 /* MMX, greg */
2330 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2331 IEM_MC_BEGIN(0, 1);
2332 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2333 IEM_MC_LOCAL(uint64_t, u64Tmp);
2334 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2335 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2336 else
2337 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2338 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2339 IEM_MC_ADVANCE_RIP();
2340 IEM_MC_END();
2341 }
2342 else
2343 {
2344 /* MMX, [mem] */
2345 IEM_MC_BEGIN(0, 2);
2346 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2347 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2348 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2349 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2350 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2351 {
2352 IEM_MC_LOCAL(uint64_t, u64Tmp);
2353 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2354 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2355 }
2356 else
2357 {
2358 IEM_MC_LOCAL(uint32_t, u32Tmp);
2359 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2360 IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp);
2361 }
2362 IEM_MC_ADVANCE_RIP();
2363 IEM_MC_END();
2364 }
2365 return VINF_SUCCESS;
2366
2367 default:
2368 return IEMOP_RAISE_INVALID_OPCODE();
2369 }
2370}
2371
2372
2373/** Opcode 0x0f 0x6f. */
2374FNIEMOP_DEF(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq)
2375{
2376 bool fAligned = false;
2377 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2378 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2379 {
2380 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
2381 fAligned = true;
2382 case IEM_OP_PRF_REPZ: /* SSE unaligned */
2383 if (fAligned)
2384 IEMOP_MNEMONIC("movdqa Vdq,Wdq");
2385 else
2386 IEMOP_MNEMONIC("movdqu Vdq,Wdq");
2387 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2388 {
2389 /*
2390 * Register, register.
2391 */
2392 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2393 IEM_MC_BEGIN(0, 1);
2394 IEM_MC_LOCAL(uint128_t, u128Tmp);
2395 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2396 IEM_MC_FETCH_XREG_U128(u128Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2397 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2398 IEM_MC_ADVANCE_RIP();
2399 IEM_MC_END();
2400 }
2401 else
2402 {
2403 /*
2404 * Register, memory.
2405 */
2406 IEM_MC_BEGIN(0, 2);
2407 IEM_MC_LOCAL(uint128_t, u128Tmp);
2408 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2409
2410 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2411 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2412 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2413 if (fAligned)
2414 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2415 else
2416 IEM_MC_FETCH_MEM_U128(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2417 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2418
2419 IEM_MC_ADVANCE_RIP();
2420 IEM_MC_END();
2421 }
2422 return VINF_SUCCESS;
2423
2424 case 0: /* MMX */
2425 IEMOP_MNEMONIC("movq Pq,Qq");
2426 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2427 {
2428 /*
2429 * Register, register.
2430 */
2431 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2432 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2433 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2434 IEM_MC_BEGIN(0, 1);
2435 IEM_MC_LOCAL(uint64_t, u64Tmp);
2436 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2437 IEM_MC_FETCH_MREG_U64(u64Tmp, bRm & X86_MODRM_RM_MASK);
2438 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2439 IEM_MC_ADVANCE_RIP();
2440 IEM_MC_END();
2441 }
2442 else
2443 {
2444 /*
2445 * Register, memory.
2446 */
2447 IEM_MC_BEGIN(0, 2);
2448 IEM_MC_LOCAL(uint64_t, u64Tmp);
2449 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2450
2451 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2452 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2453 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2454 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2455 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2456
2457 IEM_MC_ADVANCE_RIP();
2458 IEM_MC_END();
2459 }
2460 return VINF_SUCCESS;
2461
2462 default:
2463 return IEMOP_RAISE_INVALID_OPCODE();
2464 }
2465}
2466
2467
2468/** Opcode 0x0f 0x70. The immediate here is evil! */
2469FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib)
2470{
2471 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2472 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2473 {
2474 case IEM_OP_PRF_SIZE_OP: /* SSE */
2475 case IEM_OP_PRF_REPNZ: /* SSE */
2476 case IEM_OP_PRF_REPZ: /* SSE */
2477 {
2478 PFNIEMAIMPLMEDIAPSHUF pfnAImpl;
2479 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2480 {
2481 case IEM_OP_PRF_SIZE_OP:
2482 IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib");
2483 pfnAImpl = iemAImpl_pshufd;
2484 break;
2485 case IEM_OP_PRF_REPNZ:
2486 IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib");
2487 pfnAImpl = iemAImpl_pshuflw;
2488 break;
2489 case IEM_OP_PRF_REPZ:
2490 IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib");
2491 pfnAImpl = iemAImpl_pshufhw;
2492 break;
2493 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2494 }
2495 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2496 {
2497 /*
2498 * Register, register.
2499 */
2500 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2501 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2502
2503 IEM_MC_BEGIN(3, 0);
2504 IEM_MC_ARG(uint128_t *, pDst, 0);
2505 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2506 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2507 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2508 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2509 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2510 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2511 IEM_MC_ADVANCE_RIP();
2512 IEM_MC_END();
2513 }
2514 else
2515 {
2516 /*
2517 * Register, memory.
2518 */
2519 IEM_MC_BEGIN(3, 2);
2520 IEM_MC_ARG(uint128_t *, pDst, 0);
2521 IEM_MC_LOCAL(uint128_t, uSrc);
2522 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2523 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2524
2525 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2526 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2527 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2528 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2529 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2530
2531 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2532 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2533 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2534
2535 IEM_MC_ADVANCE_RIP();
2536 IEM_MC_END();
2537 }
2538 return VINF_SUCCESS;
2539 }
2540
2541 case 0: /* MMX Extension */
2542 IEMOP_MNEMONIC("pshufw Pq,Qq,Ib");
2543 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2544 {
2545 /*
2546 * Register, register.
2547 */
2548 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2549 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2550
2551 IEM_MC_BEGIN(3, 0);
2552 IEM_MC_ARG(uint64_t *, pDst, 0);
2553 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2554 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2555 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2556 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2557 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2558 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2559 IEM_MC_ADVANCE_RIP();
2560 IEM_MC_END();
2561 }
2562 else
2563 {
2564 /*
2565 * Register, memory.
2566 */
2567 IEM_MC_BEGIN(3, 2);
2568 IEM_MC_ARG(uint64_t *, pDst, 0);
2569 IEM_MC_LOCAL(uint64_t, uSrc);
2570 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2571 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2572
2573 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2574 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2575 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2576 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2577 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2578
2579 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2580 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2581 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2582
2583 IEM_MC_ADVANCE_RIP();
2584 IEM_MC_END();
2585 }
2586 return VINF_SUCCESS;
2587
2588 default:
2589 return IEMOP_RAISE_INVALID_OPCODE();
2590 }
2591}
2592
2593
2594/** Opcode 0x0f 0x71 11/2. */
2595FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Nq_Ib, uint8_t, bRm);
2596
2597/** Opcode 0x66 0x0f 0x71 11/2. */
2598FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Udq_Ib, uint8_t, bRm);
2599
2600/** Opcode 0x0f 0x71 11/4. */
2601FNIEMOP_STUB_1(iemOp_Grp12_psraw_Nq_Ib, uint8_t, bRm);
2602
2603/** Opcode 0x66 0x0f 0x71 11/4. */
2604FNIEMOP_STUB_1(iemOp_Grp12_psraw_Udq_Ib, uint8_t, bRm);
2605
2606/** Opcode 0x0f 0x71 11/6. */
2607FNIEMOP_STUB_1(iemOp_Grp12_psllw_Nq_Ib, uint8_t, bRm);
2608
2609/** Opcode 0x66 0x0f 0x71 11/6. */
2610FNIEMOP_STUB_1(iemOp_Grp12_psllw_Udq_Ib, uint8_t, bRm);
2611
2612
2613/** Opcode 0x0f 0x71. */
2614FNIEMOP_DEF(iemOp_Grp12)
2615{
2616 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2617 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2618 return IEMOP_RAISE_INVALID_OPCODE();
2619 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2620 {
2621 case 0: case 1: case 3: case 5: case 7:
2622 return IEMOP_RAISE_INVALID_OPCODE();
2623 case 2:
2624 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2625 {
2626 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Nq_Ib, bRm);
2627 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Udq_Ib, bRm);
2628 default: return IEMOP_RAISE_INVALID_OPCODE();
2629 }
2630 case 4:
2631 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2632 {
2633 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Nq_Ib, bRm);
2634 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Udq_Ib, bRm);
2635 default: return IEMOP_RAISE_INVALID_OPCODE();
2636 }
2637 case 6:
2638 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2639 {
2640 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Nq_Ib, bRm);
2641 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Udq_Ib, bRm);
2642 default: return IEMOP_RAISE_INVALID_OPCODE();
2643 }
2644 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2645 }
2646}
2647
2648
2649/** Opcode 0x0f 0x72 11/2. */
2650FNIEMOP_STUB_1(iemOp_Grp13_psrld_Nq_Ib, uint8_t, bRm);
2651
2652/** Opcode 0x66 0x0f 0x72 11/2. */
2653FNIEMOP_STUB_1(iemOp_Grp13_psrld_Udq_Ib, uint8_t, bRm);
2654
2655/** Opcode 0x0f 0x72 11/4. */
2656FNIEMOP_STUB_1(iemOp_Grp13_psrad_Nq_Ib, uint8_t, bRm);
2657
2658/** Opcode 0x66 0x0f 0x72 11/4. */
2659FNIEMOP_STUB_1(iemOp_Grp13_psrad_Udq_Ib, uint8_t, bRm);
2660
2661/** Opcode 0x0f 0x72 11/6. */
2662FNIEMOP_STUB_1(iemOp_Grp13_pslld_Nq_Ib, uint8_t, bRm);
2663
2664/** Opcode 0x66 0x0f 0x72 11/6. */
2665FNIEMOP_STUB_1(iemOp_Grp13_pslld_Udq_Ib, uint8_t, bRm);
2666
2667
2668/** Opcode 0x0f 0x72. */
2669FNIEMOP_DEF(iemOp_Grp13)
2670{
2671 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2672 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2673 return IEMOP_RAISE_INVALID_OPCODE();
2674 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2675 {
2676 case 0: case 1: case 3: case 5: case 7:
2677 return IEMOP_RAISE_INVALID_OPCODE();
2678 case 2:
2679 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2680 {
2681 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Nq_Ib, bRm);
2682 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Udq_Ib, bRm);
2683 default: return IEMOP_RAISE_INVALID_OPCODE();
2684 }
2685 case 4:
2686 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2687 {
2688 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Nq_Ib, bRm);
2689 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Udq_Ib, bRm);
2690 default: return IEMOP_RAISE_INVALID_OPCODE();
2691 }
2692 case 6:
2693 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2694 {
2695 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Nq_Ib, bRm);
2696 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Udq_Ib, bRm);
2697 default: return IEMOP_RAISE_INVALID_OPCODE();
2698 }
2699 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2700 }
2701}
2702
2703
2704/** Opcode 0x0f 0x73 11/2. */
2705FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Nq_Ib, uint8_t, bRm);
2706
2707/** Opcode 0x66 0x0f 0x73 11/2. */
2708FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Udq_Ib, uint8_t, bRm);
2709
2710/** Opcode 0x66 0x0f 0x73 11/3. */
2711FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); //NEXT
2712
2713/** Opcode 0x0f 0x73 11/6. */
2714FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm);
2715
2716/** Opcode 0x66 0x0f 0x73 11/6. */
2717FNIEMOP_STUB_1(iemOp_Grp14_psllq_Udq_Ib, uint8_t, bRm);
2718
2719/** Opcode 0x66 0x0f 0x73 11/7. */
2720FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); //NEXT
2721
2722
2723/** Opcode 0x0f 0x73. */
2724FNIEMOP_DEF(iemOp_Grp14)
2725{
2726 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2727 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2728 return IEMOP_RAISE_INVALID_OPCODE();
2729 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2730 {
2731 case 0: case 1: case 4: case 5:
2732 return IEMOP_RAISE_INVALID_OPCODE();
2733 case 2:
2734 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2735 {
2736 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Nq_Ib, bRm);
2737 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Udq_Ib, bRm);
2738 default: return IEMOP_RAISE_INVALID_OPCODE();
2739 }
2740 case 3:
2741 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2742 {
2743 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrldq_Udq_Ib, bRm);
2744 default: return IEMOP_RAISE_INVALID_OPCODE();
2745 }
2746 case 6:
2747 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2748 {
2749 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Nq_Ib, bRm);
2750 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Udq_Ib, bRm);
2751 default: return IEMOP_RAISE_INVALID_OPCODE();
2752 }
2753 case 7:
2754 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2755 {
2756 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_pslldq_Udq_Ib, bRm);
2757 default: return IEMOP_RAISE_INVALID_OPCODE();
2758 }
2759 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2760 }
2761}
2762
2763
2764/**
2765 * Common worker for SSE2 and MMX instructions on the forms:
2766 * pxxx mm1, mm2/mem64
2767 * pxxx xmm1, xmm2/mem128
2768 *
2769 * Proper alignment of the 128-bit operand is enforced.
2770 * Exceptions type 4. SSE2 and MMX cpuid checks.
2771 */
2772FNIEMOP_DEF_1(iemOpCommonMmxSse2_FullFull_To_Full, PCIEMOPMEDIAF2, pImpl)
2773{
2774 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2775 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2776 {
2777 case IEM_OP_PRF_SIZE_OP: /* SSE */
2778 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2779 {
2780 /*
2781 * Register, register.
2782 */
2783 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2784 IEM_MC_BEGIN(2, 0);
2785 IEM_MC_ARG(uint128_t *, pDst, 0);
2786 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2787 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2788 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2789 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2790 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2791 IEM_MC_ADVANCE_RIP();
2792 IEM_MC_END();
2793 }
2794 else
2795 {
2796 /*
2797 * Register, memory.
2798 */
2799 IEM_MC_BEGIN(2, 2);
2800 IEM_MC_ARG(uint128_t *, pDst, 0);
2801 IEM_MC_LOCAL(uint128_t, uSrc);
2802 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2803 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2804
2805 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2806 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2807 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2808 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2809
2810 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2811 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2812
2813 IEM_MC_ADVANCE_RIP();
2814 IEM_MC_END();
2815 }
2816 return VINF_SUCCESS;
2817
2818 case 0: /* MMX */
2819 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2820 {
2821 /*
2822 * Register, register.
2823 */
2824 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2825 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2826 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2827 IEM_MC_BEGIN(2, 0);
2828 IEM_MC_ARG(uint64_t *, pDst, 0);
2829 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2830 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2831 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2832 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2833 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2834 IEM_MC_ADVANCE_RIP();
2835 IEM_MC_END();
2836 }
2837 else
2838 {
2839 /*
2840 * Register, memory.
2841 */
2842 IEM_MC_BEGIN(2, 2);
2843 IEM_MC_ARG(uint64_t *, pDst, 0);
2844 IEM_MC_LOCAL(uint64_t, uSrc);
2845 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2846 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2847
2848 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2849 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2850 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2851 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2852
2853 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2854 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2855
2856 IEM_MC_ADVANCE_RIP();
2857 IEM_MC_END();
2858 }
2859 return VINF_SUCCESS;
2860
2861 default:
2862 return IEMOP_RAISE_INVALID_OPCODE();
2863 }
2864}
2865
2866
2867/** Opcode 0x0f 0x74. */
2868FNIEMOP_DEF(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq)
2869{
2870 IEMOP_MNEMONIC("pcmpeqb");
2871 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqb);
2872}
2873
2874
2875/** Opcode 0x0f 0x75. */
2876FNIEMOP_DEF(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq)
2877{
2878 IEMOP_MNEMONIC("pcmpeqw");
2879 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqw);
2880}
2881
2882
2883/** Opcode 0x0f 0x76. */
2884FNIEMOP_DEF(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq)
2885{
2886 IEMOP_MNEMONIC("pcmpeqd");
2887 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqd);
2888}
2889
2890
2891/** Opcode 0x0f 0x77. */
2892FNIEMOP_STUB(iemOp_emms);
2893/** Opcode 0x0f 0x78. */
2894FNIEMOP_UD_STUB(iemOp_vmread_AmdGrp17);
2895/** Opcode 0x0f 0x79. */
2896FNIEMOP_UD_STUB(iemOp_vmwrite);
2897/** Opcode 0x0f 0x7c. */
2898FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
2899/** Opcode 0x0f 0x7d. */
2900FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
2901
2902
2903/** Opcode 0x0f 0x7e. */
2904FNIEMOP_DEF(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq)
2905{
2906 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2907 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2908 {
2909 case IEM_OP_PRF_SIZE_OP: /* SSE */
2910 IEMOP_MNEMONIC("movd/q Ed/q,Wd/q");
2911 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2912 {
2913 /* greg, XMM */
2914 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2915 IEM_MC_BEGIN(0, 1);
2916 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2917 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2918 {
2919 IEM_MC_LOCAL(uint64_t, u64Tmp);
2920 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2921 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
2922 }
2923 else
2924 {
2925 IEM_MC_LOCAL(uint32_t, u32Tmp);
2926 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2927 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
2928 }
2929 IEM_MC_ADVANCE_RIP();
2930 IEM_MC_END();
2931 }
2932 else
2933 {
2934 /* [mem], XMM */
2935 IEM_MC_BEGIN(0, 2);
2936 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2937 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2938 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2939 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2940 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2941 {
2942 IEM_MC_LOCAL(uint64_t, u64Tmp);
2943 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2944 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
2945 }
2946 else
2947 {
2948 IEM_MC_LOCAL(uint32_t, u32Tmp);
2949 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2950 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
2951 }
2952 IEM_MC_ADVANCE_RIP();
2953 IEM_MC_END();
2954 }
2955 return VINF_SUCCESS;
2956
2957 case 0: /* MMX */
2958 IEMOP_MNEMONIC("movq/d Ed/q,Pd/q");
2959 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2960 {
2961 /* greg, MMX */
2962 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2963 IEM_MC_BEGIN(0, 1);
2964 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2965 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2966 {
2967 IEM_MC_LOCAL(uint64_t, u64Tmp);
2968 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2969 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
2970 }
2971 else
2972 {
2973 IEM_MC_LOCAL(uint32_t, u32Tmp);
2974 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2975 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
2976 }
2977 IEM_MC_ADVANCE_RIP();
2978 IEM_MC_END();
2979 }
2980 else
2981 {
2982 /* [mem], MMX */
2983 IEM_MC_BEGIN(0, 2);
2984 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2985 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2986 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2987 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2988 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2989 {
2990 IEM_MC_LOCAL(uint64_t, u64Tmp);
2991 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2992 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
2993 }
2994 else
2995 {
2996 IEM_MC_LOCAL(uint32_t, u32Tmp);
2997 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2998 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
2999 }
3000 IEM_MC_ADVANCE_RIP();
3001 IEM_MC_END();
3002 }
3003 return VINF_SUCCESS;
3004
3005 default:
3006 return IEMOP_RAISE_INVALID_OPCODE();
3007 }
3008}
3009
3010
3011/** Opcode 0x0f 0x7f. */
3012FNIEMOP_DEF(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq)
3013{
3014 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3015 bool fAligned = false;
3016 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
3017 {
3018 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
3019 fAligned = true;
3020 case IEM_OP_PRF_REPZ: /* SSE unaligned */
3021 if (fAligned)
3022 IEMOP_MNEMONIC("movdqa Wdq,Vdq");
3023 else
3024 IEMOP_MNEMONIC("movdqu Wdq,Vdq");
3025 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3026 {
3027 /*
3028 * Register, register.
3029 */
3030 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3031 IEM_MC_BEGIN(0, 1);
3032 IEM_MC_LOCAL(uint128_t, u128Tmp);
3033 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3034 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3035 IEM_MC_STORE_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u128Tmp);
3036 IEM_MC_ADVANCE_RIP();
3037 IEM_MC_END();
3038 }
3039 else
3040 {
3041 /*
3042 * Register, memory.
3043 */
3044 IEM_MC_BEGIN(0, 2);
3045 IEM_MC_LOCAL(uint128_t, u128Tmp);
3046 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3047
3048 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3049 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3050 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3051 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3052 if (fAligned)
3053 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3054 else
3055 IEM_MC_STORE_MEM_U128(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3056
3057 IEM_MC_ADVANCE_RIP();
3058 IEM_MC_END();
3059 }
3060 return VINF_SUCCESS;
3061
3062 case 0: /* MMX */
3063 IEMOP_MNEMONIC("movq Qq,Pq");
3064
3065 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3066 {
3067 /*
3068 * Register, register.
3069 */
3070 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
3071 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
3072 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3073 IEM_MC_BEGIN(0, 1);
3074 IEM_MC_LOCAL(uint64_t, u64Tmp);
3075 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3076 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3077 IEM_MC_STORE_MREG_U64(bRm & X86_MODRM_RM_MASK, u64Tmp);
3078 IEM_MC_ADVANCE_RIP();
3079 IEM_MC_END();
3080 }
3081 else
3082 {
3083 /*
3084 * Register, memory.
3085 */
3086 IEM_MC_BEGIN(0, 2);
3087 IEM_MC_LOCAL(uint64_t, u64Tmp);
3088 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3089
3090 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3091 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3092 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3093 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3094 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
3095
3096 IEM_MC_ADVANCE_RIP();
3097 IEM_MC_END();
3098 }
3099 return VINF_SUCCESS;
3100
3101 default:
3102 return IEMOP_RAISE_INVALID_OPCODE();
3103 }
3104}
3105
3106
3107
3108/** Opcode 0x0f 0x80. */
3109FNIEMOP_DEF(iemOp_jo_Jv)
3110{
3111 IEMOP_MNEMONIC("jo Jv");
3112 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3113 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3114 {
3115 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3116 IEMOP_HLP_NO_LOCK_PREFIX();
3117
3118 IEM_MC_BEGIN(0, 0);
3119 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3120 IEM_MC_REL_JMP_S16(i16Imm);
3121 } IEM_MC_ELSE() {
3122 IEM_MC_ADVANCE_RIP();
3123 } IEM_MC_ENDIF();
3124 IEM_MC_END();
3125 }
3126 else
3127 {
3128 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3129 IEMOP_HLP_NO_LOCK_PREFIX();
3130
3131 IEM_MC_BEGIN(0, 0);
3132 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3133 IEM_MC_REL_JMP_S32(i32Imm);
3134 } IEM_MC_ELSE() {
3135 IEM_MC_ADVANCE_RIP();
3136 } IEM_MC_ENDIF();
3137 IEM_MC_END();
3138 }
3139 return VINF_SUCCESS;
3140}
3141
3142
3143/** Opcode 0x0f 0x81. */
3144FNIEMOP_DEF(iemOp_jno_Jv)
3145{
3146 IEMOP_MNEMONIC("jno Jv");
3147 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3148 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3149 {
3150 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3151 IEMOP_HLP_NO_LOCK_PREFIX();
3152
3153 IEM_MC_BEGIN(0, 0);
3154 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3155 IEM_MC_ADVANCE_RIP();
3156 } IEM_MC_ELSE() {
3157 IEM_MC_REL_JMP_S16(i16Imm);
3158 } IEM_MC_ENDIF();
3159 IEM_MC_END();
3160 }
3161 else
3162 {
3163 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3164 IEMOP_HLP_NO_LOCK_PREFIX();
3165
3166 IEM_MC_BEGIN(0, 0);
3167 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3168 IEM_MC_ADVANCE_RIP();
3169 } IEM_MC_ELSE() {
3170 IEM_MC_REL_JMP_S32(i32Imm);
3171 } IEM_MC_ENDIF();
3172 IEM_MC_END();
3173 }
3174 return VINF_SUCCESS;
3175}
3176
3177
3178/** Opcode 0x0f 0x82. */
3179FNIEMOP_DEF(iemOp_jc_Jv)
3180{
3181 IEMOP_MNEMONIC("jc/jb/jnae Jv");
3182 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3183 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3184 {
3185 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3186 IEMOP_HLP_NO_LOCK_PREFIX();
3187
3188 IEM_MC_BEGIN(0, 0);
3189 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3190 IEM_MC_REL_JMP_S16(i16Imm);
3191 } IEM_MC_ELSE() {
3192 IEM_MC_ADVANCE_RIP();
3193 } IEM_MC_ENDIF();
3194 IEM_MC_END();
3195 }
3196 else
3197 {
3198 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3199 IEMOP_HLP_NO_LOCK_PREFIX();
3200
3201 IEM_MC_BEGIN(0, 0);
3202 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3203 IEM_MC_REL_JMP_S32(i32Imm);
3204 } IEM_MC_ELSE() {
3205 IEM_MC_ADVANCE_RIP();
3206 } IEM_MC_ENDIF();
3207 IEM_MC_END();
3208 }
3209 return VINF_SUCCESS;
3210}
3211
3212
3213/** Opcode 0x0f 0x83. */
3214FNIEMOP_DEF(iemOp_jnc_Jv)
3215{
3216 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
3217 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3218 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3219 {
3220 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3221 IEMOP_HLP_NO_LOCK_PREFIX();
3222
3223 IEM_MC_BEGIN(0, 0);
3224 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3225 IEM_MC_ADVANCE_RIP();
3226 } IEM_MC_ELSE() {
3227 IEM_MC_REL_JMP_S16(i16Imm);
3228 } IEM_MC_ENDIF();
3229 IEM_MC_END();
3230 }
3231 else
3232 {
3233 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3234 IEMOP_HLP_NO_LOCK_PREFIX();
3235
3236 IEM_MC_BEGIN(0, 0);
3237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3238 IEM_MC_ADVANCE_RIP();
3239 } IEM_MC_ELSE() {
3240 IEM_MC_REL_JMP_S32(i32Imm);
3241 } IEM_MC_ENDIF();
3242 IEM_MC_END();
3243 }
3244 return VINF_SUCCESS;
3245}
3246
3247
3248/** Opcode 0x0f 0x84. */
3249FNIEMOP_DEF(iemOp_je_Jv)
3250{
3251 IEMOP_MNEMONIC("je/jz Jv");
3252 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3253 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3254 {
3255 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3256 IEMOP_HLP_NO_LOCK_PREFIX();
3257
3258 IEM_MC_BEGIN(0, 0);
3259 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3260 IEM_MC_REL_JMP_S16(i16Imm);
3261 } IEM_MC_ELSE() {
3262 IEM_MC_ADVANCE_RIP();
3263 } IEM_MC_ENDIF();
3264 IEM_MC_END();
3265 }
3266 else
3267 {
3268 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3269 IEMOP_HLP_NO_LOCK_PREFIX();
3270
3271 IEM_MC_BEGIN(0, 0);
3272 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3273 IEM_MC_REL_JMP_S32(i32Imm);
3274 } IEM_MC_ELSE() {
3275 IEM_MC_ADVANCE_RIP();
3276 } IEM_MC_ENDIF();
3277 IEM_MC_END();
3278 }
3279 return VINF_SUCCESS;
3280}
3281
3282
3283/** Opcode 0x0f 0x85. */
3284FNIEMOP_DEF(iemOp_jne_Jv)
3285{
3286 IEMOP_MNEMONIC("jne/jnz Jv");
3287 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3288 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3289 {
3290 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3291 IEMOP_HLP_NO_LOCK_PREFIX();
3292
3293 IEM_MC_BEGIN(0, 0);
3294 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3295 IEM_MC_ADVANCE_RIP();
3296 } IEM_MC_ELSE() {
3297 IEM_MC_REL_JMP_S16(i16Imm);
3298 } IEM_MC_ENDIF();
3299 IEM_MC_END();
3300 }
3301 else
3302 {
3303 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3304 IEMOP_HLP_NO_LOCK_PREFIX();
3305
3306 IEM_MC_BEGIN(0, 0);
3307 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3308 IEM_MC_ADVANCE_RIP();
3309 } IEM_MC_ELSE() {
3310 IEM_MC_REL_JMP_S32(i32Imm);
3311 } IEM_MC_ENDIF();
3312 IEM_MC_END();
3313 }
3314 return VINF_SUCCESS;
3315}
3316
3317
3318/** Opcode 0x0f 0x86. */
3319FNIEMOP_DEF(iemOp_jbe_Jv)
3320{
3321 IEMOP_MNEMONIC("jbe/jna Jv");
3322 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3323 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3324 {
3325 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3326 IEMOP_HLP_NO_LOCK_PREFIX();
3327
3328 IEM_MC_BEGIN(0, 0);
3329 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3330 IEM_MC_REL_JMP_S16(i16Imm);
3331 } IEM_MC_ELSE() {
3332 IEM_MC_ADVANCE_RIP();
3333 } IEM_MC_ENDIF();
3334 IEM_MC_END();
3335 }
3336 else
3337 {
3338 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3339 IEMOP_HLP_NO_LOCK_PREFIX();
3340
3341 IEM_MC_BEGIN(0, 0);
3342 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3343 IEM_MC_REL_JMP_S32(i32Imm);
3344 } IEM_MC_ELSE() {
3345 IEM_MC_ADVANCE_RIP();
3346 } IEM_MC_ENDIF();
3347 IEM_MC_END();
3348 }
3349 return VINF_SUCCESS;
3350}
3351
3352
3353/** Opcode 0x0f 0x87. */
3354FNIEMOP_DEF(iemOp_jnbe_Jv)
3355{
3356 IEMOP_MNEMONIC("jnbe/ja Jv");
3357 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3358 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3359 {
3360 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3361 IEMOP_HLP_NO_LOCK_PREFIX();
3362
3363 IEM_MC_BEGIN(0, 0);
3364 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3365 IEM_MC_ADVANCE_RIP();
3366 } IEM_MC_ELSE() {
3367 IEM_MC_REL_JMP_S16(i16Imm);
3368 } IEM_MC_ENDIF();
3369 IEM_MC_END();
3370 }
3371 else
3372 {
3373 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3374 IEMOP_HLP_NO_LOCK_PREFIX();
3375
3376 IEM_MC_BEGIN(0, 0);
3377 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3378 IEM_MC_ADVANCE_RIP();
3379 } IEM_MC_ELSE() {
3380 IEM_MC_REL_JMP_S32(i32Imm);
3381 } IEM_MC_ENDIF();
3382 IEM_MC_END();
3383 }
3384 return VINF_SUCCESS;
3385}
3386
3387
3388/** Opcode 0x0f 0x88. */
3389FNIEMOP_DEF(iemOp_js_Jv)
3390{
3391 IEMOP_MNEMONIC("js Jv");
3392 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3393 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3394 {
3395 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3396 IEMOP_HLP_NO_LOCK_PREFIX();
3397
3398 IEM_MC_BEGIN(0, 0);
3399 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3400 IEM_MC_REL_JMP_S16(i16Imm);
3401 } IEM_MC_ELSE() {
3402 IEM_MC_ADVANCE_RIP();
3403 } IEM_MC_ENDIF();
3404 IEM_MC_END();
3405 }
3406 else
3407 {
3408 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3409 IEMOP_HLP_NO_LOCK_PREFIX();
3410
3411 IEM_MC_BEGIN(0, 0);
3412 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3413 IEM_MC_REL_JMP_S32(i32Imm);
3414 } IEM_MC_ELSE() {
3415 IEM_MC_ADVANCE_RIP();
3416 } IEM_MC_ENDIF();
3417 IEM_MC_END();
3418 }
3419 return VINF_SUCCESS;
3420}
3421
3422
3423/** Opcode 0x0f 0x89. */
3424FNIEMOP_DEF(iemOp_jns_Jv)
3425{
3426 IEMOP_MNEMONIC("jns Jv");
3427 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3428 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3429 {
3430 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3431 IEMOP_HLP_NO_LOCK_PREFIX();
3432
3433 IEM_MC_BEGIN(0, 0);
3434 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3435 IEM_MC_ADVANCE_RIP();
3436 } IEM_MC_ELSE() {
3437 IEM_MC_REL_JMP_S16(i16Imm);
3438 } IEM_MC_ENDIF();
3439 IEM_MC_END();
3440 }
3441 else
3442 {
3443 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3444 IEMOP_HLP_NO_LOCK_PREFIX();
3445
3446 IEM_MC_BEGIN(0, 0);
3447 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3448 IEM_MC_ADVANCE_RIP();
3449 } IEM_MC_ELSE() {
3450 IEM_MC_REL_JMP_S32(i32Imm);
3451 } IEM_MC_ENDIF();
3452 IEM_MC_END();
3453 }
3454 return VINF_SUCCESS;
3455}
3456
3457
3458/** Opcode 0x0f 0x8a. */
3459FNIEMOP_DEF(iemOp_jp_Jv)
3460{
3461 IEMOP_MNEMONIC("jp Jv");
3462 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3463 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3464 {
3465 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3466 IEMOP_HLP_NO_LOCK_PREFIX();
3467
3468 IEM_MC_BEGIN(0, 0);
3469 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3470 IEM_MC_REL_JMP_S16(i16Imm);
3471 } IEM_MC_ELSE() {
3472 IEM_MC_ADVANCE_RIP();
3473 } IEM_MC_ENDIF();
3474 IEM_MC_END();
3475 }
3476 else
3477 {
3478 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3479 IEMOP_HLP_NO_LOCK_PREFIX();
3480
3481 IEM_MC_BEGIN(0, 0);
3482 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3483 IEM_MC_REL_JMP_S32(i32Imm);
3484 } IEM_MC_ELSE() {
3485 IEM_MC_ADVANCE_RIP();
3486 } IEM_MC_ENDIF();
3487 IEM_MC_END();
3488 }
3489 return VINF_SUCCESS;
3490}
3491
3492
3493/** Opcode 0x0f 0x8b. */
3494FNIEMOP_DEF(iemOp_jnp_Jv)
3495{
3496 IEMOP_MNEMONIC("jo Jv");
3497 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3498 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3499 {
3500 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3501 IEMOP_HLP_NO_LOCK_PREFIX();
3502
3503 IEM_MC_BEGIN(0, 0);
3504 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3505 IEM_MC_ADVANCE_RIP();
3506 } IEM_MC_ELSE() {
3507 IEM_MC_REL_JMP_S16(i16Imm);
3508 } IEM_MC_ENDIF();
3509 IEM_MC_END();
3510 }
3511 else
3512 {
3513 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3514 IEMOP_HLP_NO_LOCK_PREFIX();
3515
3516 IEM_MC_BEGIN(0, 0);
3517 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3518 IEM_MC_ADVANCE_RIP();
3519 } IEM_MC_ELSE() {
3520 IEM_MC_REL_JMP_S32(i32Imm);
3521 } IEM_MC_ENDIF();
3522 IEM_MC_END();
3523 }
3524 return VINF_SUCCESS;
3525}
3526
3527
3528/** Opcode 0x0f 0x8c. */
3529FNIEMOP_DEF(iemOp_jl_Jv)
3530{
3531 IEMOP_MNEMONIC("jl/jnge Jv");
3532 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3533 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3534 {
3535 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3536 IEMOP_HLP_NO_LOCK_PREFIX();
3537
3538 IEM_MC_BEGIN(0, 0);
3539 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3540 IEM_MC_REL_JMP_S16(i16Imm);
3541 } IEM_MC_ELSE() {
3542 IEM_MC_ADVANCE_RIP();
3543 } IEM_MC_ENDIF();
3544 IEM_MC_END();
3545 }
3546 else
3547 {
3548 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3549 IEMOP_HLP_NO_LOCK_PREFIX();
3550
3551 IEM_MC_BEGIN(0, 0);
3552 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3553 IEM_MC_REL_JMP_S32(i32Imm);
3554 } IEM_MC_ELSE() {
3555 IEM_MC_ADVANCE_RIP();
3556 } IEM_MC_ENDIF();
3557 IEM_MC_END();
3558 }
3559 return VINF_SUCCESS;
3560}
3561
3562
3563/** Opcode 0x0f 0x8d. */
3564FNIEMOP_DEF(iemOp_jnl_Jv)
3565{
3566 IEMOP_MNEMONIC("jnl/jge Jv");
3567 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3568 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3569 {
3570 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3571 IEMOP_HLP_NO_LOCK_PREFIX();
3572
3573 IEM_MC_BEGIN(0, 0);
3574 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3575 IEM_MC_ADVANCE_RIP();
3576 } IEM_MC_ELSE() {
3577 IEM_MC_REL_JMP_S16(i16Imm);
3578 } IEM_MC_ENDIF();
3579 IEM_MC_END();
3580 }
3581 else
3582 {
3583 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3584 IEMOP_HLP_NO_LOCK_PREFIX();
3585
3586 IEM_MC_BEGIN(0, 0);
3587 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3588 IEM_MC_ADVANCE_RIP();
3589 } IEM_MC_ELSE() {
3590 IEM_MC_REL_JMP_S32(i32Imm);
3591 } IEM_MC_ENDIF();
3592 IEM_MC_END();
3593 }
3594 return VINF_SUCCESS;
3595}
3596
3597
3598/** Opcode 0x0f 0x8e. */
3599FNIEMOP_DEF(iemOp_jle_Jv)
3600{
3601 IEMOP_MNEMONIC("jle/jng Jv");
3602 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3603 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3604 {
3605 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3606 IEMOP_HLP_NO_LOCK_PREFIX();
3607
3608 IEM_MC_BEGIN(0, 0);
3609 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3610 IEM_MC_REL_JMP_S16(i16Imm);
3611 } IEM_MC_ELSE() {
3612 IEM_MC_ADVANCE_RIP();
3613 } IEM_MC_ENDIF();
3614 IEM_MC_END();
3615 }
3616 else
3617 {
3618 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3619 IEMOP_HLP_NO_LOCK_PREFIX();
3620
3621 IEM_MC_BEGIN(0, 0);
3622 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3623 IEM_MC_REL_JMP_S32(i32Imm);
3624 } IEM_MC_ELSE() {
3625 IEM_MC_ADVANCE_RIP();
3626 } IEM_MC_ENDIF();
3627 IEM_MC_END();
3628 }
3629 return VINF_SUCCESS;
3630}
3631
3632
3633/** Opcode 0x0f 0x8f. */
3634FNIEMOP_DEF(iemOp_jnle_Jv)
3635{
3636 IEMOP_MNEMONIC("jnle/jg Jv");
3637 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3638 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3639 {
3640 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3641 IEMOP_HLP_NO_LOCK_PREFIX();
3642
3643 IEM_MC_BEGIN(0, 0);
3644 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3645 IEM_MC_ADVANCE_RIP();
3646 } IEM_MC_ELSE() {
3647 IEM_MC_REL_JMP_S16(i16Imm);
3648 } IEM_MC_ENDIF();
3649 IEM_MC_END();
3650 }
3651 else
3652 {
3653 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3654 IEMOP_HLP_NO_LOCK_PREFIX();
3655
3656 IEM_MC_BEGIN(0, 0);
3657 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3658 IEM_MC_ADVANCE_RIP();
3659 } IEM_MC_ELSE() {
3660 IEM_MC_REL_JMP_S32(i32Imm);
3661 } IEM_MC_ENDIF();
3662 IEM_MC_END();
3663 }
3664 return VINF_SUCCESS;
3665}
3666
3667
3668/** Opcode 0x0f 0x90. */
3669FNIEMOP_DEF(iemOp_seto_Eb)
3670{
3671 IEMOP_MNEMONIC("seto Eb");
3672 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3673 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3674
3675 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3676 * any way. AMD says it's "unused", whatever that means. We're
3677 * ignoring for now. */
3678 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3679 {
3680 /* register target */
3681 IEM_MC_BEGIN(0, 0);
3682 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3683 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3684 } IEM_MC_ELSE() {
3685 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3686 } IEM_MC_ENDIF();
3687 IEM_MC_ADVANCE_RIP();
3688 IEM_MC_END();
3689 }
3690 else
3691 {
3692 /* memory target */
3693 IEM_MC_BEGIN(0, 1);
3694 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3695 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3696 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3697 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3698 } IEM_MC_ELSE() {
3699 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3700 } IEM_MC_ENDIF();
3701 IEM_MC_ADVANCE_RIP();
3702 IEM_MC_END();
3703 }
3704 return VINF_SUCCESS;
3705}
3706
3707
3708/** Opcode 0x0f 0x91. */
3709FNIEMOP_DEF(iemOp_setno_Eb)
3710{
3711 IEMOP_MNEMONIC("setno Eb");
3712 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3713 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3714
3715 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3716 * any way. AMD says it's "unused", whatever that means. We're
3717 * ignoring for now. */
3718 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3719 {
3720 /* register target */
3721 IEM_MC_BEGIN(0, 0);
3722 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3723 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3724 } IEM_MC_ELSE() {
3725 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3726 } IEM_MC_ENDIF();
3727 IEM_MC_ADVANCE_RIP();
3728 IEM_MC_END();
3729 }
3730 else
3731 {
3732 /* memory target */
3733 IEM_MC_BEGIN(0, 1);
3734 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3735 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3736 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3737 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3738 } IEM_MC_ELSE() {
3739 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3740 } IEM_MC_ENDIF();
3741 IEM_MC_ADVANCE_RIP();
3742 IEM_MC_END();
3743 }
3744 return VINF_SUCCESS;
3745}
3746
3747
3748/** Opcode 0x0f 0x92. */
3749FNIEMOP_DEF(iemOp_setc_Eb)
3750{
3751 IEMOP_MNEMONIC("setc Eb");
3752 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3753 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3754
3755 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3756 * any way. AMD says it's "unused", whatever that means. We're
3757 * ignoring for now. */
3758 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3759 {
3760 /* register target */
3761 IEM_MC_BEGIN(0, 0);
3762 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3763 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3764 } IEM_MC_ELSE() {
3765 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3766 } IEM_MC_ENDIF();
3767 IEM_MC_ADVANCE_RIP();
3768 IEM_MC_END();
3769 }
3770 else
3771 {
3772 /* memory target */
3773 IEM_MC_BEGIN(0, 1);
3774 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3775 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3776 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3777 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3778 } IEM_MC_ELSE() {
3779 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3780 } IEM_MC_ENDIF();
3781 IEM_MC_ADVANCE_RIP();
3782 IEM_MC_END();
3783 }
3784 return VINF_SUCCESS;
3785}
3786
3787
3788/** Opcode 0x0f 0x93. */
3789FNIEMOP_DEF(iemOp_setnc_Eb)
3790{
3791 IEMOP_MNEMONIC("setnc Eb");
3792 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3793 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3794
3795 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3796 * any way. AMD says it's "unused", whatever that means. We're
3797 * ignoring for now. */
3798 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3799 {
3800 /* register target */
3801 IEM_MC_BEGIN(0, 0);
3802 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3803 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3804 } IEM_MC_ELSE() {
3805 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3806 } IEM_MC_ENDIF();
3807 IEM_MC_ADVANCE_RIP();
3808 IEM_MC_END();
3809 }
3810 else
3811 {
3812 /* memory target */
3813 IEM_MC_BEGIN(0, 1);
3814 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3815 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3816 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3817 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3818 } IEM_MC_ELSE() {
3819 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3820 } IEM_MC_ENDIF();
3821 IEM_MC_ADVANCE_RIP();
3822 IEM_MC_END();
3823 }
3824 return VINF_SUCCESS;
3825}
3826
3827
3828/** Opcode 0x0f 0x94. */
3829FNIEMOP_DEF(iemOp_sete_Eb)
3830{
3831 IEMOP_MNEMONIC("sete Eb");
3832 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3833 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3834
3835 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3836 * any way. AMD says it's "unused", whatever that means. We're
3837 * ignoring for now. */
3838 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3839 {
3840 /* register target */
3841 IEM_MC_BEGIN(0, 0);
3842 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3843 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3844 } IEM_MC_ELSE() {
3845 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3846 } IEM_MC_ENDIF();
3847 IEM_MC_ADVANCE_RIP();
3848 IEM_MC_END();
3849 }
3850 else
3851 {
3852 /* memory target */
3853 IEM_MC_BEGIN(0, 1);
3854 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3855 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3856 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3857 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3858 } IEM_MC_ELSE() {
3859 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3860 } IEM_MC_ENDIF();
3861 IEM_MC_ADVANCE_RIP();
3862 IEM_MC_END();
3863 }
3864 return VINF_SUCCESS;
3865}
3866
3867
3868/** Opcode 0x0f 0x95. */
3869FNIEMOP_DEF(iemOp_setne_Eb)
3870{
3871 IEMOP_MNEMONIC("setne Eb");
3872 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3873 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3874
3875 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3876 * any way. AMD says it's "unused", whatever that means. We're
3877 * ignoring for now. */
3878 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3879 {
3880 /* register target */
3881 IEM_MC_BEGIN(0, 0);
3882 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3883 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3884 } IEM_MC_ELSE() {
3885 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3886 } IEM_MC_ENDIF();
3887 IEM_MC_ADVANCE_RIP();
3888 IEM_MC_END();
3889 }
3890 else
3891 {
3892 /* memory target */
3893 IEM_MC_BEGIN(0, 1);
3894 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3896 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3897 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3898 } IEM_MC_ELSE() {
3899 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3900 } IEM_MC_ENDIF();
3901 IEM_MC_ADVANCE_RIP();
3902 IEM_MC_END();
3903 }
3904 return VINF_SUCCESS;
3905}
3906
3907
3908/** Opcode 0x0f 0x96. */
3909FNIEMOP_DEF(iemOp_setbe_Eb)
3910{
3911 IEMOP_MNEMONIC("setbe Eb");
3912 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3913 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3914
3915 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3916 * any way. AMD says it's "unused", whatever that means. We're
3917 * ignoring for now. */
3918 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3919 {
3920 /* register target */
3921 IEM_MC_BEGIN(0, 0);
3922 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3923 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3924 } IEM_MC_ELSE() {
3925 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3926 } IEM_MC_ENDIF();
3927 IEM_MC_ADVANCE_RIP();
3928 IEM_MC_END();
3929 }
3930 else
3931 {
3932 /* memory target */
3933 IEM_MC_BEGIN(0, 1);
3934 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3935 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3936 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3937 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3938 } IEM_MC_ELSE() {
3939 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3940 } IEM_MC_ENDIF();
3941 IEM_MC_ADVANCE_RIP();
3942 IEM_MC_END();
3943 }
3944 return VINF_SUCCESS;
3945}
3946
3947
3948/** Opcode 0x0f 0x97. */
3949FNIEMOP_DEF(iemOp_setnbe_Eb)
3950{
3951 IEMOP_MNEMONIC("setnbe Eb");
3952 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3953 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3954
3955 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3956 * any way. AMD says it's "unused", whatever that means. We're
3957 * ignoring for now. */
3958 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3959 {
3960 /* register target */
3961 IEM_MC_BEGIN(0, 0);
3962 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3963 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3964 } IEM_MC_ELSE() {
3965 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3966 } IEM_MC_ENDIF();
3967 IEM_MC_ADVANCE_RIP();
3968 IEM_MC_END();
3969 }
3970 else
3971 {
3972 /* memory target */
3973 IEM_MC_BEGIN(0, 1);
3974 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3975 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3976 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3977 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3978 } IEM_MC_ELSE() {
3979 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3980 } IEM_MC_ENDIF();
3981 IEM_MC_ADVANCE_RIP();
3982 IEM_MC_END();
3983 }
3984 return VINF_SUCCESS;
3985}
3986
3987
3988/** Opcode 0x0f 0x98. */
3989FNIEMOP_DEF(iemOp_sets_Eb)
3990{
3991 IEMOP_MNEMONIC("sets Eb");
3992 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3993 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3994
3995 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3996 * any way. AMD says it's "unused", whatever that means. We're
3997 * ignoring for now. */
3998 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3999 {
4000 /* register target */
4001 IEM_MC_BEGIN(0, 0);
4002 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4003 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4004 } IEM_MC_ELSE() {
4005 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4006 } IEM_MC_ENDIF();
4007 IEM_MC_ADVANCE_RIP();
4008 IEM_MC_END();
4009 }
4010 else
4011 {
4012 /* memory target */
4013 IEM_MC_BEGIN(0, 1);
4014 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4015 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4016 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4017 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4018 } IEM_MC_ELSE() {
4019 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4020 } IEM_MC_ENDIF();
4021 IEM_MC_ADVANCE_RIP();
4022 IEM_MC_END();
4023 }
4024 return VINF_SUCCESS;
4025}
4026
4027
4028/** Opcode 0x0f 0x99. */
4029FNIEMOP_DEF(iemOp_setns_Eb)
4030{
4031 IEMOP_MNEMONIC("setns Eb");
4032 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4033 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4034
4035 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4036 * any way. AMD says it's "unused", whatever that means. We're
4037 * ignoring for now. */
4038 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4039 {
4040 /* register target */
4041 IEM_MC_BEGIN(0, 0);
4042 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4043 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4044 } IEM_MC_ELSE() {
4045 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4046 } IEM_MC_ENDIF();
4047 IEM_MC_ADVANCE_RIP();
4048 IEM_MC_END();
4049 }
4050 else
4051 {
4052 /* memory target */
4053 IEM_MC_BEGIN(0, 1);
4054 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4055 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4056 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4057 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4058 } IEM_MC_ELSE() {
4059 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4060 } IEM_MC_ENDIF();
4061 IEM_MC_ADVANCE_RIP();
4062 IEM_MC_END();
4063 }
4064 return VINF_SUCCESS;
4065}
4066
4067
4068/** Opcode 0x0f 0x9a. */
4069FNIEMOP_DEF(iemOp_setp_Eb)
4070{
4071 IEMOP_MNEMONIC("setnp Eb");
4072 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4073 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4074
4075 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4076 * any way. AMD says it's "unused", whatever that means. We're
4077 * ignoring for now. */
4078 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4079 {
4080 /* register target */
4081 IEM_MC_BEGIN(0, 0);
4082 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4083 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4084 } IEM_MC_ELSE() {
4085 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4086 } IEM_MC_ENDIF();
4087 IEM_MC_ADVANCE_RIP();
4088 IEM_MC_END();
4089 }
4090 else
4091 {
4092 /* memory target */
4093 IEM_MC_BEGIN(0, 1);
4094 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4095 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4096 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4097 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4098 } IEM_MC_ELSE() {
4099 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4100 } IEM_MC_ENDIF();
4101 IEM_MC_ADVANCE_RIP();
4102 IEM_MC_END();
4103 }
4104 return VINF_SUCCESS;
4105}
4106
4107
4108/** Opcode 0x0f 0x9b. */
4109FNIEMOP_DEF(iemOp_setnp_Eb)
4110{
4111 IEMOP_MNEMONIC("setnp Eb");
4112 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4113 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4114
4115 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4116 * any way. AMD says it's "unused", whatever that means. We're
4117 * ignoring for now. */
4118 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4119 {
4120 /* register target */
4121 IEM_MC_BEGIN(0, 0);
4122 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4123 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4124 } IEM_MC_ELSE() {
4125 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4126 } IEM_MC_ENDIF();
4127 IEM_MC_ADVANCE_RIP();
4128 IEM_MC_END();
4129 }
4130 else
4131 {
4132 /* memory target */
4133 IEM_MC_BEGIN(0, 1);
4134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4135 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4136 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4137 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4138 } IEM_MC_ELSE() {
4139 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4140 } IEM_MC_ENDIF();
4141 IEM_MC_ADVANCE_RIP();
4142 IEM_MC_END();
4143 }
4144 return VINF_SUCCESS;
4145}
4146
4147
4148/** Opcode 0x0f 0x9c. */
4149FNIEMOP_DEF(iemOp_setl_Eb)
4150{
4151 IEMOP_MNEMONIC("setl Eb");
4152 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4153 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4154
4155 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4156 * any way. AMD says it's "unused", whatever that means. We're
4157 * ignoring for now. */
4158 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4159 {
4160 /* register target */
4161 IEM_MC_BEGIN(0, 0);
4162 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4163 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4164 } IEM_MC_ELSE() {
4165 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4166 } IEM_MC_ENDIF();
4167 IEM_MC_ADVANCE_RIP();
4168 IEM_MC_END();
4169 }
4170 else
4171 {
4172 /* memory target */
4173 IEM_MC_BEGIN(0, 1);
4174 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4175 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4176 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4177 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4178 } IEM_MC_ELSE() {
4179 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4180 } IEM_MC_ENDIF();
4181 IEM_MC_ADVANCE_RIP();
4182 IEM_MC_END();
4183 }
4184 return VINF_SUCCESS;
4185}
4186
4187
4188/** Opcode 0x0f 0x9d. */
4189FNIEMOP_DEF(iemOp_setnl_Eb)
4190{
4191 IEMOP_MNEMONIC("setnl Eb");
4192 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4193 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4194
4195 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4196 * any way. AMD says it's "unused", whatever that means. We're
4197 * ignoring for now. */
4198 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4199 {
4200 /* register target */
4201 IEM_MC_BEGIN(0, 0);
4202 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4203 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4204 } IEM_MC_ELSE() {
4205 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4206 } IEM_MC_ENDIF();
4207 IEM_MC_ADVANCE_RIP();
4208 IEM_MC_END();
4209 }
4210 else
4211 {
4212 /* memory target */
4213 IEM_MC_BEGIN(0, 1);
4214 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4216 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4217 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4218 } IEM_MC_ELSE() {
4219 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4220 } IEM_MC_ENDIF();
4221 IEM_MC_ADVANCE_RIP();
4222 IEM_MC_END();
4223 }
4224 return VINF_SUCCESS;
4225}
4226
4227
4228/** Opcode 0x0f 0x9e. */
4229FNIEMOP_DEF(iemOp_setle_Eb)
4230{
4231 IEMOP_MNEMONIC("setle Eb");
4232 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4233 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4234
4235 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4236 * any way. AMD says it's "unused", whatever that means. We're
4237 * ignoring for now. */
4238 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4239 {
4240 /* register target */
4241 IEM_MC_BEGIN(0, 0);
4242 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4243 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4244 } IEM_MC_ELSE() {
4245 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4246 } IEM_MC_ENDIF();
4247 IEM_MC_ADVANCE_RIP();
4248 IEM_MC_END();
4249 }
4250 else
4251 {
4252 /* memory target */
4253 IEM_MC_BEGIN(0, 1);
4254 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4255 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4256 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4257 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4258 } IEM_MC_ELSE() {
4259 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4260 } IEM_MC_ENDIF();
4261 IEM_MC_ADVANCE_RIP();
4262 IEM_MC_END();
4263 }
4264 return VINF_SUCCESS;
4265}
4266
4267
4268/** Opcode 0x0f 0x9f. */
4269FNIEMOP_DEF(iemOp_setnle_Eb)
4270{
4271 IEMOP_MNEMONIC("setnle Eb");
4272 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4273 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4274
4275 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4276 * any way. AMD says it's "unused", whatever that means. We're
4277 * ignoring for now. */
4278 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4279 {
4280 /* register target */
4281 IEM_MC_BEGIN(0, 0);
4282 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4283 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4284 } IEM_MC_ELSE() {
4285 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4286 } IEM_MC_ENDIF();
4287 IEM_MC_ADVANCE_RIP();
4288 IEM_MC_END();
4289 }
4290 else
4291 {
4292 /* memory target */
4293 IEM_MC_BEGIN(0, 1);
4294 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4295 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4296 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4297 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4298 } IEM_MC_ELSE() {
4299 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4300 } IEM_MC_ENDIF();
4301 IEM_MC_ADVANCE_RIP();
4302 IEM_MC_END();
4303 }
4304 return VINF_SUCCESS;
4305}
4306
4307
4308/**
4309 * Common 'push segment-register' helper.
4310 */
4311FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
4312{
4313 IEMOP_HLP_NO_LOCK_PREFIX();
4314 if (iReg < X86_SREG_FS)
4315 IEMOP_HLP_NO_64BIT();
4316 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
4317
4318 switch (pIemCpu->enmEffOpSize)
4319 {
4320 case IEMMODE_16BIT:
4321 IEM_MC_BEGIN(0, 1);
4322 IEM_MC_LOCAL(uint16_t, u16Value);
4323 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
4324 IEM_MC_PUSH_U16(u16Value);
4325 IEM_MC_ADVANCE_RIP();
4326 IEM_MC_END();
4327 break;
4328
4329 case IEMMODE_32BIT:
4330 IEM_MC_BEGIN(0, 1);
4331 IEM_MC_LOCAL(uint32_t, u32Value);
4332 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
4333 IEM_MC_PUSH_U32(u32Value);
4334 IEM_MC_ADVANCE_RIP();
4335 IEM_MC_END();
4336 break;
4337
4338 case IEMMODE_64BIT:
4339 IEM_MC_BEGIN(0, 1);
4340 IEM_MC_LOCAL(uint64_t, u64Value);
4341 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
4342 IEM_MC_PUSH_U64(u64Value);
4343 IEM_MC_ADVANCE_RIP();
4344 IEM_MC_END();
4345 break;
4346 }
4347
4348 return VINF_SUCCESS;
4349}
4350
4351
4352/** Opcode 0x0f 0xa0. */
4353FNIEMOP_DEF(iemOp_push_fs)
4354{
4355 IEMOP_MNEMONIC("push fs");
4356 IEMOP_HLP_NO_LOCK_PREFIX();
4357 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
4358}
4359
4360
4361/** Opcode 0x0f 0xa1. */
4362FNIEMOP_DEF(iemOp_pop_fs)
4363{
4364 IEMOP_MNEMONIC("pop fs");
4365 IEMOP_HLP_NO_LOCK_PREFIX();
4366 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
4367}
4368
4369
4370/** Opcode 0x0f 0xa2. */
4371FNIEMOP_DEF(iemOp_cpuid)
4372{
4373 IEMOP_MNEMONIC("cpuid");
4374 IEMOP_HLP_NO_LOCK_PREFIX();
4375 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
4376}
4377
4378
4379/**
4380 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
4381 * iemOp_bts_Ev_Gv.
4382 */
4383FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
4384{
4385 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4386 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
4387
4388 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4389 {
4390 /* register destination. */
4391 IEMOP_HLP_NO_LOCK_PREFIX();
4392 switch (pIemCpu->enmEffOpSize)
4393 {
4394 case IEMMODE_16BIT:
4395 IEM_MC_BEGIN(3, 0);
4396 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4397 IEM_MC_ARG(uint16_t, u16Src, 1);
4398 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4399
4400 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4401 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
4402 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4403 IEM_MC_REF_EFLAGS(pEFlags);
4404 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4405
4406 IEM_MC_ADVANCE_RIP();
4407 IEM_MC_END();
4408 return VINF_SUCCESS;
4409
4410 case IEMMODE_32BIT:
4411 IEM_MC_BEGIN(3, 0);
4412 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4413 IEM_MC_ARG(uint32_t, u32Src, 1);
4414 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4415
4416 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4417 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
4418 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4419 IEM_MC_REF_EFLAGS(pEFlags);
4420 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4421
4422 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4423 IEM_MC_ADVANCE_RIP();
4424 IEM_MC_END();
4425 return VINF_SUCCESS;
4426
4427 case IEMMODE_64BIT:
4428 IEM_MC_BEGIN(3, 0);
4429 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4430 IEM_MC_ARG(uint64_t, u64Src, 1);
4431 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4432
4433 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4434 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
4435 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4436 IEM_MC_REF_EFLAGS(pEFlags);
4437 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4438
4439 IEM_MC_ADVANCE_RIP();
4440 IEM_MC_END();
4441 return VINF_SUCCESS;
4442
4443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4444 }
4445 }
4446 else
4447 {
4448 /* memory destination. */
4449
4450 uint32_t fAccess;
4451 if (pImpl->pfnLockedU16)
4452 fAccess = IEM_ACCESS_DATA_RW;
4453 else /* BT */
4454 {
4455 IEMOP_HLP_NO_LOCK_PREFIX();
4456 fAccess = IEM_ACCESS_DATA_R;
4457 }
4458
4459 /** @todo test negative bit offsets! */
4460 switch (pIemCpu->enmEffOpSize)
4461 {
4462 case IEMMODE_16BIT:
4463 IEM_MC_BEGIN(3, 2);
4464 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4465 IEM_MC_ARG(uint16_t, u16Src, 1);
4466 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4467 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4468 IEM_MC_LOCAL(int16_t, i16AddrAdj);
4469
4470 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4471 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4472 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
4473 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
4474 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
4475 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
4476 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
4477 IEM_MC_FETCH_EFLAGS(EFlags);
4478
4479 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4480 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4481 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4482 else
4483 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
4484 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4485
4486 IEM_MC_COMMIT_EFLAGS(EFlags);
4487 IEM_MC_ADVANCE_RIP();
4488 IEM_MC_END();
4489 return VINF_SUCCESS;
4490
4491 case IEMMODE_32BIT:
4492 IEM_MC_BEGIN(3, 2);
4493 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4494 IEM_MC_ARG(uint32_t, u32Src, 1);
4495 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4496 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4497 IEM_MC_LOCAL(int32_t, i32AddrAdj);
4498
4499 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4500 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4501 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
4502 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
4503 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
4504 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
4505 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
4506 IEM_MC_FETCH_EFLAGS(EFlags);
4507
4508 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4509 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4510 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4511 else
4512 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
4513 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4514
4515 IEM_MC_COMMIT_EFLAGS(EFlags);
4516 IEM_MC_ADVANCE_RIP();
4517 IEM_MC_END();
4518 return VINF_SUCCESS;
4519
4520 case IEMMODE_64BIT:
4521 IEM_MC_BEGIN(3, 2);
4522 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4523 IEM_MC_ARG(uint64_t, u64Src, 1);
4524 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4525 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4526 IEM_MC_LOCAL(int64_t, i64AddrAdj);
4527
4528 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4529 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4530 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
4531 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
4532 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
4533 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
4534 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
4535 IEM_MC_FETCH_EFLAGS(EFlags);
4536
4537 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4538 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4539 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4540 else
4541 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
4542 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4543
4544 IEM_MC_COMMIT_EFLAGS(EFlags);
4545 IEM_MC_ADVANCE_RIP();
4546 IEM_MC_END();
4547 return VINF_SUCCESS;
4548
4549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4550 }
4551 }
4552}
4553
4554
4555/** Opcode 0x0f 0xa3. */
4556FNIEMOP_DEF(iemOp_bt_Ev_Gv)
4557{
4558 IEMOP_MNEMONIC("bt Gv,Gv");
4559 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
4560}
4561
4562
4563/**
4564 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
4565 */
4566FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
4567{
4568 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4569 IEMOP_HLP_NO_LOCK_PREFIX();
4570 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4571
4572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4573 {
4574 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4575 IEMOP_HLP_NO_LOCK_PREFIX();
4576
4577 switch (pIemCpu->enmEffOpSize)
4578 {
4579 case IEMMODE_16BIT:
4580 IEM_MC_BEGIN(4, 0);
4581 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4582 IEM_MC_ARG(uint16_t, u16Src, 1);
4583 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4584 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4585
4586 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4587 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4588 IEM_MC_REF_EFLAGS(pEFlags);
4589 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4590
4591 IEM_MC_ADVANCE_RIP();
4592 IEM_MC_END();
4593 return VINF_SUCCESS;
4594
4595 case IEMMODE_32BIT:
4596 IEM_MC_BEGIN(4, 0);
4597 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4598 IEM_MC_ARG(uint32_t, u32Src, 1);
4599 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4600 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4601
4602 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4603 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4604 IEM_MC_REF_EFLAGS(pEFlags);
4605 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4606
4607 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4608 IEM_MC_ADVANCE_RIP();
4609 IEM_MC_END();
4610 return VINF_SUCCESS;
4611
4612 case IEMMODE_64BIT:
4613 IEM_MC_BEGIN(4, 0);
4614 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4615 IEM_MC_ARG(uint64_t, u64Src, 1);
4616 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4617 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4618
4619 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4620 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4621 IEM_MC_REF_EFLAGS(pEFlags);
4622 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4623
4624 IEM_MC_ADVANCE_RIP();
4625 IEM_MC_END();
4626 return VINF_SUCCESS;
4627
4628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4629 }
4630 }
4631 else
4632 {
4633 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4634
4635 switch (pIemCpu->enmEffOpSize)
4636 {
4637 case IEMMODE_16BIT:
4638 IEM_MC_BEGIN(4, 2);
4639 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4640 IEM_MC_ARG(uint16_t, u16Src, 1);
4641 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4642 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4643 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4644
4645 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4646 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4647 IEM_MC_ASSIGN(cShiftArg, cShift);
4648 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4649 IEM_MC_FETCH_EFLAGS(EFlags);
4650 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4651 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4652
4653 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4654 IEM_MC_COMMIT_EFLAGS(EFlags);
4655 IEM_MC_ADVANCE_RIP();
4656 IEM_MC_END();
4657 return VINF_SUCCESS;
4658
4659 case IEMMODE_32BIT:
4660 IEM_MC_BEGIN(4, 2);
4661 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4662 IEM_MC_ARG(uint32_t, u32Src, 1);
4663 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4664 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4665 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4666
4667 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4668 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4669 IEM_MC_ASSIGN(cShiftArg, cShift);
4670 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4671 IEM_MC_FETCH_EFLAGS(EFlags);
4672 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4673 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4674
4675 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4676 IEM_MC_COMMIT_EFLAGS(EFlags);
4677 IEM_MC_ADVANCE_RIP();
4678 IEM_MC_END();
4679 return VINF_SUCCESS;
4680
4681 case IEMMODE_64BIT:
4682 IEM_MC_BEGIN(4, 2);
4683 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4684 IEM_MC_ARG(uint64_t, u64Src, 1);
4685 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4686 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4687 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4688
4689 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4690 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4691 IEM_MC_ASSIGN(cShiftArg, cShift);
4692 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4693 IEM_MC_FETCH_EFLAGS(EFlags);
4694 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4695 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4696
4697 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4698 IEM_MC_COMMIT_EFLAGS(EFlags);
4699 IEM_MC_ADVANCE_RIP();
4700 IEM_MC_END();
4701 return VINF_SUCCESS;
4702
4703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4704 }
4705 }
4706}
4707
4708
4709/**
4710 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
4711 */
4712FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
4713{
4714 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4715 IEMOP_HLP_NO_LOCK_PREFIX();
4716 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4717
4718 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4719 {
4720 IEMOP_HLP_NO_LOCK_PREFIX();
4721
4722 switch (pIemCpu->enmEffOpSize)
4723 {
4724 case IEMMODE_16BIT:
4725 IEM_MC_BEGIN(4, 0);
4726 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4727 IEM_MC_ARG(uint16_t, u16Src, 1);
4728 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4729 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4730
4731 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4732 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4733 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4734 IEM_MC_REF_EFLAGS(pEFlags);
4735 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4736
4737 IEM_MC_ADVANCE_RIP();
4738 IEM_MC_END();
4739 return VINF_SUCCESS;
4740
4741 case IEMMODE_32BIT:
4742 IEM_MC_BEGIN(4, 0);
4743 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4744 IEM_MC_ARG(uint32_t, u32Src, 1);
4745 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4746 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4747
4748 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4749 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4750 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4751 IEM_MC_REF_EFLAGS(pEFlags);
4752 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4753
4754 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4755 IEM_MC_ADVANCE_RIP();
4756 IEM_MC_END();
4757 return VINF_SUCCESS;
4758
4759 case IEMMODE_64BIT:
4760 IEM_MC_BEGIN(4, 0);
4761 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4762 IEM_MC_ARG(uint64_t, u64Src, 1);
4763 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4764 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4765
4766 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4767 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4768 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4769 IEM_MC_REF_EFLAGS(pEFlags);
4770 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4771
4772 IEM_MC_ADVANCE_RIP();
4773 IEM_MC_END();
4774 return VINF_SUCCESS;
4775
4776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4777 }
4778 }
4779 else
4780 {
4781 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4782
4783 switch (pIemCpu->enmEffOpSize)
4784 {
4785 case IEMMODE_16BIT:
4786 IEM_MC_BEGIN(4, 2);
4787 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4788 IEM_MC_ARG(uint16_t, u16Src, 1);
4789 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4790 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4791 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4792
4793 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4794 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4795 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4796 IEM_MC_FETCH_EFLAGS(EFlags);
4797 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4798 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4799
4800 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4801 IEM_MC_COMMIT_EFLAGS(EFlags);
4802 IEM_MC_ADVANCE_RIP();
4803 IEM_MC_END();
4804 return VINF_SUCCESS;
4805
4806 case IEMMODE_32BIT:
4807 IEM_MC_BEGIN(4, 2);
4808 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4809 IEM_MC_ARG(uint32_t, u32Src, 1);
4810 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4811 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4812 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4813
4814 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4815 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4816 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4817 IEM_MC_FETCH_EFLAGS(EFlags);
4818 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4819 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4820
4821 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4822 IEM_MC_COMMIT_EFLAGS(EFlags);
4823 IEM_MC_ADVANCE_RIP();
4824 IEM_MC_END();
4825 return VINF_SUCCESS;
4826
4827 case IEMMODE_64BIT:
4828 IEM_MC_BEGIN(4, 2);
4829 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4830 IEM_MC_ARG(uint64_t, u64Src, 1);
4831 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4832 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4833 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4834
4835 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4836 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4837 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4838 IEM_MC_FETCH_EFLAGS(EFlags);
4839 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4840 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4841
4842 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4843 IEM_MC_COMMIT_EFLAGS(EFlags);
4844 IEM_MC_ADVANCE_RIP();
4845 IEM_MC_END();
4846 return VINF_SUCCESS;
4847
4848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4849 }
4850 }
4851}
4852
4853
4854
4855/** Opcode 0x0f 0xa4. */
4856FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
4857{
4858 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
4859 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
4860}
4861
4862
4863/** Opcode 0x0f 0xa7. */
4864FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
4865{
4866 IEMOP_MNEMONIC("shld Ev,Gv,CL");
4867 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
4868}
4869
4870
4871/** Opcode 0x0f 0xa8. */
4872FNIEMOP_DEF(iemOp_push_gs)
4873{
4874 IEMOP_MNEMONIC("push gs");
4875 IEMOP_HLP_NO_LOCK_PREFIX();
4876 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
4877}
4878
4879
4880/** Opcode 0x0f 0xa9. */
4881FNIEMOP_DEF(iemOp_pop_gs)
4882{
4883 IEMOP_MNEMONIC("pop gs");
4884 IEMOP_HLP_NO_LOCK_PREFIX();
4885 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
4886}
4887
4888
4889/** Opcode 0x0f 0xaa. */
4890FNIEMOP_STUB(iemOp_rsm);
4891
4892
4893/** Opcode 0x0f 0xab. */
4894FNIEMOP_DEF(iemOp_bts_Ev_Gv)
4895{
4896 IEMOP_MNEMONIC("bts Ev,Gv");
4897 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
4898}
4899
4900
4901/** Opcode 0x0f 0xac. */
4902FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
4903{
4904 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
4905 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
4906}
4907
4908
4909/** Opcode 0x0f 0xad. */
4910FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
4911{
4912 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
4913 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
4914}
4915
4916
4917/** Opcode 0x0f 0xae mem/0. */
4918FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
4919{
4920 IEMOP_MNEMONIC("fxsave m512");
4921 IEMOP_HLP_NO_LOCK_PREFIX();
4922 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
4923 return IEMOP_RAISE_INVALID_OPCODE();
4924
4925 IEM_MC_BEGIN(3, 1);
4926 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
4927 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
4928 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
4929 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
4930 IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize);
4931 IEM_MC_END();
4932 return VINF_SUCCESS;
4933}
4934
4935
4936/** Opcode 0x0f 0xae mem/1. */
4937FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm)
4938{
4939 IEMOP_MNEMONIC("fxrstor m512");
4940 IEMOP_HLP_NO_LOCK_PREFIX();
4941 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
4942 return IEMOP_RAISE_INVALID_OPCODE();
4943
4944 IEM_MC_BEGIN(3, 1);
4945 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
4946 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
4947 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
4948 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
4949 IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize);
4950 IEM_MC_END();
4951 return VINF_SUCCESS;
4952}
4953
4954
4955/** Opcode 0x0f 0xae mem/2. */
4956FNIEMOP_STUB_1(iemOp_Grp15_ldmxcsr, uint8_t, bRm);
4957
4958/** Opcode 0x0f 0xae mem/3. */
4959FNIEMOP_STUB_1(iemOp_Grp15_stmxcsr, uint8_t, bRm);
4960
4961/** Opcode 0x0f 0xae mem/4. */
4962FNIEMOP_UD_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm);
4963
4964/** Opcode 0x0f 0xae mem/5. */
4965FNIEMOP_UD_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm);
4966
4967/** Opcode 0x0f 0xae mem/6. */
4968FNIEMOP_UD_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm);
4969
4970/** Opcode 0x0f 0xae mem/7. */
4971FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm);
4972
4973
4974/** Opcode 0x0f 0xae 11b/5. */
4975FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm)
4976{
4977 IEMOP_MNEMONIC("lfence");
4978 IEMOP_HLP_NO_LOCK_PREFIX();
4979 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
4980 return IEMOP_RAISE_INVALID_OPCODE();
4981
4982 IEM_MC_BEGIN(0, 0);
4983 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
4984 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence);
4985 else
4986 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
4987 IEM_MC_ADVANCE_RIP();
4988 IEM_MC_END();
4989 return VINF_SUCCESS;
4990}
4991
4992
4993/** Opcode 0x0f 0xae 11b/6. */
4994FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm)
4995{
4996 IEMOP_MNEMONIC("mfence");
4997 IEMOP_HLP_NO_LOCK_PREFIX();
4998 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
4999 return IEMOP_RAISE_INVALID_OPCODE();
5000
5001 IEM_MC_BEGIN(0, 0);
5002 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
5003 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence);
5004 else
5005 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5006 IEM_MC_ADVANCE_RIP();
5007 IEM_MC_END();
5008 return VINF_SUCCESS;
5009}
5010
5011
5012/** Opcode 0x0f 0xae 11b/7. */
5013FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm)
5014{
5015 IEMOP_MNEMONIC("sfence");
5016 IEMOP_HLP_NO_LOCK_PREFIX();
5017 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
5018 return IEMOP_RAISE_INVALID_OPCODE();
5019
5020 IEM_MC_BEGIN(0, 0);
5021 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
5022 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence);
5023 else
5024 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5025 IEM_MC_ADVANCE_RIP();
5026 IEM_MC_END();
5027 return VINF_SUCCESS;
5028}
5029
5030
5031/** Opcode 0xf3 0x0f 0xae 11b/0. */
5032FNIEMOP_UD_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm);
5033
5034/** Opcode 0xf3 0x0f 0xae 11b/1. */
5035FNIEMOP_UD_STUB_1(iemOp_Grp15_rdgsbase, uint8_t, bRm);
5036
5037/** Opcode 0xf3 0x0f 0xae 11b/2. */
5038FNIEMOP_UD_STUB_1(iemOp_Grp15_wrfsbase, uint8_t, bRm);
5039
5040/** Opcode 0xf3 0x0f 0xae 11b/3. */
5041FNIEMOP_UD_STUB_1(iemOp_Grp15_wrgsbase, uint8_t, bRm);
5042
5043
5044/** Opcode 0x0f 0xae. */
5045FNIEMOP_DEF(iemOp_Grp15)
5046{
5047 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5048 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
5049 {
5050 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5051 {
5052 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_fxsave, bRm);
5053 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_fxrstor, bRm);
5054 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_ldmxcsr, bRm);
5055 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_stmxcsr, bRm);
5056 case 4: return FNIEMOP_CALL_1(iemOp_Grp15_xsave, bRm);
5057 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_xrstor, bRm);
5058 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_xsaveopt,bRm);
5059 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_clflush, bRm);
5060 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5061 }
5062 }
5063 else
5064 {
5065 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))
5066 {
5067 case 0:
5068 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5069 {
5070 case 0: return IEMOP_RAISE_INVALID_OPCODE();
5071 case 1: return IEMOP_RAISE_INVALID_OPCODE();
5072 case 2: return IEMOP_RAISE_INVALID_OPCODE();
5073 case 3: return IEMOP_RAISE_INVALID_OPCODE();
5074 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5075 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_lfence, bRm);
5076 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_mfence, bRm);
5077 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_sfence, bRm);
5078 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5079 }
5080 break;
5081
5082 case IEM_OP_PRF_REPZ:
5083 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5084 {
5085 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_rdfsbase, bRm);
5086 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_rdgsbase, bRm);
5087 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_wrfsbase, bRm);
5088 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_wrgsbase, bRm);
5089 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5090 case 5: return IEMOP_RAISE_INVALID_OPCODE();
5091 case 6: return IEMOP_RAISE_INVALID_OPCODE();
5092 case 7: return IEMOP_RAISE_INVALID_OPCODE();
5093 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5094 }
5095 break;
5096
5097 default:
5098 return IEMOP_RAISE_INVALID_OPCODE();
5099 }
5100 }
5101}
5102
5103
5104/** Opcode 0x0f 0xaf. */
5105FNIEMOP_DEF(iemOp_imul_Gv_Ev)
5106{
5107 IEMOP_MNEMONIC("imul Gv,Ev");
5108 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5109 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
5110}
5111
5112
5113/** Opcode 0x0f 0xb0. */
5114FNIEMOP_DEF(iemOp_cmpxchg_Eb_Gb)
5115{
5116 IEMOP_MNEMONIC("cmpxchg Eb,Gb");
5117 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5118
5119 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5120 {
5121 IEMOP_HLP_DONE_DECODING();
5122 IEM_MC_BEGIN(4, 0);
5123 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5124 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5125 IEM_MC_ARG(uint8_t, u8Src, 2);
5126 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5127
5128 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5129 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5130 IEM_MC_REF_GREG_U8(pu8Al, X86_GREG_xAX);
5131 IEM_MC_REF_EFLAGS(pEFlags);
5132 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5133 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5134 else
5135 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5136
5137 IEM_MC_ADVANCE_RIP();
5138 IEM_MC_END();
5139 }
5140 else
5141 {
5142 IEM_MC_BEGIN(4, 3);
5143 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5144 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5145 IEM_MC_ARG(uint8_t, u8Src, 2);
5146 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5147 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5148 IEM_MC_LOCAL(uint8_t, u8Al);
5149
5150 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5151 IEMOP_HLP_DONE_DECODING();
5152 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5153 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5154 IEM_MC_FETCH_GREG_U8(u8Al, X86_GREG_xAX);
5155 IEM_MC_FETCH_EFLAGS(EFlags);
5156 IEM_MC_REF_LOCAL(pu8Al, u8Al);
5157 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5158 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5159 else
5160 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5161
5162 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
5163 IEM_MC_COMMIT_EFLAGS(EFlags);
5164 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Al);
5165 IEM_MC_ADVANCE_RIP();
5166 IEM_MC_END();
5167 }
5168 return VINF_SUCCESS;
5169}
5170
5171/** Opcode 0x0f 0xb1. */
5172FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
5173{
5174 IEMOP_MNEMONIC("cmpxchg Ev,Gv");
5175 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5176
5177 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5178 {
5179 IEMOP_HLP_DONE_DECODING();
5180 switch (pIemCpu->enmEffOpSize)
5181 {
5182 case IEMMODE_16BIT:
5183 IEM_MC_BEGIN(4, 0);
5184 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5185 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5186 IEM_MC_ARG(uint16_t, u16Src, 2);
5187 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5188
5189 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5190 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5191 IEM_MC_REF_GREG_U16(pu16Ax, X86_GREG_xAX);
5192 IEM_MC_REF_EFLAGS(pEFlags);
5193 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5194 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5195 else
5196 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5197
5198 IEM_MC_ADVANCE_RIP();
5199 IEM_MC_END();
5200 return VINF_SUCCESS;
5201
5202 case IEMMODE_32BIT:
5203 IEM_MC_BEGIN(4, 0);
5204 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5205 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5206 IEM_MC_ARG(uint32_t, u32Src, 2);
5207 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5208
5209 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5210 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5211 IEM_MC_REF_GREG_U32(pu32Eax, X86_GREG_xAX);
5212 IEM_MC_REF_EFLAGS(pEFlags);
5213 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5214 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5215 else
5216 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5217
5218 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Eax);
5219 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5220 IEM_MC_ADVANCE_RIP();
5221 IEM_MC_END();
5222 return VINF_SUCCESS;
5223
5224 case IEMMODE_64BIT:
5225 IEM_MC_BEGIN(4, 0);
5226 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5227 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5228#ifdef RT_ARCH_X86
5229 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5230#else
5231 IEM_MC_ARG(uint64_t, u64Src, 2);
5232#endif
5233 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5234
5235 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5236 IEM_MC_REF_GREG_U64(pu64Rax, X86_GREG_xAX);
5237 IEM_MC_REF_EFLAGS(pEFlags);
5238#ifdef RT_ARCH_X86
5239 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5240 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5241 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5242 else
5243 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5244#else
5245 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5246 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5247 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5248 else
5249 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5250#endif
5251
5252 IEM_MC_ADVANCE_RIP();
5253 IEM_MC_END();
5254 return VINF_SUCCESS;
5255
5256 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5257 }
5258 }
5259 else
5260 {
5261 switch (pIemCpu->enmEffOpSize)
5262 {
5263 case IEMMODE_16BIT:
5264 IEM_MC_BEGIN(4, 3);
5265 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5266 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5267 IEM_MC_ARG(uint16_t, u16Src, 2);
5268 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5269 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5270 IEM_MC_LOCAL(uint16_t, u16Ax);
5271
5272 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5273 IEMOP_HLP_DONE_DECODING();
5274 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5275 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5276 IEM_MC_FETCH_GREG_U16(u16Ax, X86_GREG_xAX);
5277 IEM_MC_FETCH_EFLAGS(EFlags);
5278 IEM_MC_REF_LOCAL(pu16Ax, u16Ax);
5279 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5280 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5281 else
5282 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5283
5284 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
5285 IEM_MC_COMMIT_EFLAGS(EFlags);
5286 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Ax);
5287 IEM_MC_ADVANCE_RIP();
5288 IEM_MC_END();
5289 return VINF_SUCCESS;
5290
5291 case IEMMODE_32BIT:
5292 IEM_MC_BEGIN(4, 3);
5293 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5294 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5295 IEM_MC_ARG(uint32_t, u32Src, 2);
5296 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5297 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5298 IEM_MC_LOCAL(uint32_t, u32Eax);
5299
5300 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5301 IEMOP_HLP_DONE_DECODING();
5302 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5303 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5304 IEM_MC_FETCH_GREG_U32(u32Eax, X86_GREG_xAX);
5305 IEM_MC_FETCH_EFLAGS(EFlags);
5306 IEM_MC_REF_LOCAL(pu32Eax, u32Eax);
5307 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5308 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5309 else
5310 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5311
5312 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
5313 IEM_MC_COMMIT_EFLAGS(EFlags);
5314 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Eax);
5315 IEM_MC_ADVANCE_RIP();
5316 IEM_MC_END();
5317 return VINF_SUCCESS;
5318
5319 case IEMMODE_64BIT:
5320 IEM_MC_BEGIN(4, 3);
5321 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5322 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5323#ifdef RT_ARCH_X86
5324 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5325#else
5326 IEM_MC_ARG(uint64_t, u64Src, 2);
5327#endif
5328 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5329 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5330 IEM_MC_LOCAL(uint64_t, u64Rax);
5331
5332 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5333 IEMOP_HLP_DONE_DECODING();
5334 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5335 IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX);
5336 IEM_MC_FETCH_EFLAGS(EFlags);
5337 IEM_MC_REF_LOCAL(pu64Rax, u64Rax);
5338#ifdef RT_ARCH_X86
5339 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5340 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5341 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5342 else
5343 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5344#else
5345 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5346 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5347 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5348 else
5349 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5350#endif
5351
5352 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
5353 IEM_MC_COMMIT_EFLAGS(EFlags);
5354 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Rax);
5355 IEM_MC_ADVANCE_RIP();
5356 IEM_MC_END();
5357 return VINF_SUCCESS;
5358
5359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5360 }
5361 }
5362}
5363
5364
5365FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm)
5366{
5367 Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */
5368 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
5369
5370 switch (pIemCpu->enmEffOpSize)
5371 {
5372 case IEMMODE_16BIT:
5373 IEM_MC_BEGIN(5, 1);
5374 IEM_MC_ARG(uint16_t, uSel, 0);
5375 IEM_MC_ARG(uint16_t, offSeg, 1);
5376 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5377 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5378 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5379 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5380 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5381 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5382 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5383 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
5384 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5385 IEM_MC_END();
5386 return VINF_SUCCESS;
5387
5388 case IEMMODE_32BIT:
5389 IEM_MC_BEGIN(5, 1);
5390 IEM_MC_ARG(uint16_t, uSel, 0);
5391 IEM_MC_ARG(uint32_t, offSeg, 1);
5392 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5393 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5394 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5395 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5396 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5397 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5398 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5399 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
5400 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5401 IEM_MC_END();
5402 return VINF_SUCCESS;
5403
5404 case IEMMODE_64BIT:
5405 IEM_MC_BEGIN(5, 1);
5406 IEM_MC_ARG(uint16_t, uSel, 0);
5407 IEM_MC_ARG(uint64_t, offSeg, 1);
5408 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5409 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5410 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5411 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5412 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5413 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5414 if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
5415 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5416 else
5417 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5418 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
5419 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5420 IEM_MC_END();
5421 return VINF_SUCCESS;
5422
5423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5424 }
5425}
5426
5427
5428/** Opcode 0x0f 0xb2. */
5429FNIEMOP_DEF(iemOp_lss_Gv_Mp)
5430{
5431 IEMOP_MNEMONIC("lss Gv,Mp");
5432 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5433 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5434 return IEMOP_RAISE_INVALID_OPCODE();
5435 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_SS, bRm);
5436}
5437
5438
5439/** Opcode 0x0f 0xb3. */
5440FNIEMOP_DEF(iemOp_btr_Ev_Gv)
5441{
5442 IEMOP_MNEMONIC("btr Ev,Gv");
5443 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
5444}
5445
5446
5447/** Opcode 0x0f 0xb4. */
5448FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
5449{
5450 IEMOP_MNEMONIC("lfs Gv,Mp");
5451 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5452 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5453 return IEMOP_RAISE_INVALID_OPCODE();
5454 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_FS, bRm);
5455}
5456
5457
5458/** Opcode 0x0f 0xb5. */
5459FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
5460{
5461 IEMOP_MNEMONIC("lgs Gv,Mp");
5462 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5463 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5464 return IEMOP_RAISE_INVALID_OPCODE();
5465 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_GS, bRm);
5466}
5467
5468
5469/** Opcode 0x0f 0xb6. */
5470FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
5471{
5472 IEMOP_MNEMONIC("movzx Gv,Eb");
5473
5474 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5475 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5476
5477 /*
5478 * If rm is denoting a register, no more instruction bytes.
5479 */
5480 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5481 {
5482 switch (pIemCpu->enmEffOpSize)
5483 {
5484 case IEMMODE_16BIT:
5485 IEM_MC_BEGIN(0, 1);
5486 IEM_MC_LOCAL(uint16_t, u16Value);
5487 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5488 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5489 IEM_MC_ADVANCE_RIP();
5490 IEM_MC_END();
5491 return VINF_SUCCESS;
5492
5493 case IEMMODE_32BIT:
5494 IEM_MC_BEGIN(0, 1);
5495 IEM_MC_LOCAL(uint32_t, u32Value);
5496 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5497 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5498 IEM_MC_ADVANCE_RIP();
5499 IEM_MC_END();
5500 return VINF_SUCCESS;
5501
5502 case IEMMODE_64BIT:
5503 IEM_MC_BEGIN(0, 1);
5504 IEM_MC_LOCAL(uint64_t, u64Value);
5505 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5506 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5507 IEM_MC_ADVANCE_RIP();
5508 IEM_MC_END();
5509 return VINF_SUCCESS;
5510
5511 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5512 }
5513 }
5514 else
5515 {
5516 /*
5517 * We're loading a register from memory.
5518 */
5519 switch (pIemCpu->enmEffOpSize)
5520 {
5521 case IEMMODE_16BIT:
5522 IEM_MC_BEGIN(0, 2);
5523 IEM_MC_LOCAL(uint16_t, u16Value);
5524 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5525 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5526 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5527 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5528 IEM_MC_ADVANCE_RIP();
5529 IEM_MC_END();
5530 return VINF_SUCCESS;
5531
5532 case IEMMODE_32BIT:
5533 IEM_MC_BEGIN(0, 2);
5534 IEM_MC_LOCAL(uint32_t, u32Value);
5535 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5536 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5537 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5538 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5539 IEM_MC_ADVANCE_RIP();
5540 IEM_MC_END();
5541 return VINF_SUCCESS;
5542
5543 case IEMMODE_64BIT:
5544 IEM_MC_BEGIN(0, 2);
5545 IEM_MC_LOCAL(uint64_t, u64Value);
5546 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5547 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5548 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5549 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5550 IEM_MC_ADVANCE_RIP();
5551 IEM_MC_END();
5552 return VINF_SUCCESS;
5553
5554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5555 }
5556 }
5557}
5558
5559
5560/** Opcode 0x0f 0xb7. */
5561FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
5562{
5563 IEMOP_MNEMONIC("movzx Gv,Ew");
5564
5565 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5566 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5567
5568 /** @todo Not entirely sure how the operand size prefix is handled here,
5569 * assuming that it will be ignored. Would be nice to have a few
5570 * test for this. */
5571 /*
5572 * If rm is denoting a register, no more instruction bytes.
5573 */
5574 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5575 {
5576 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5577 {
5578 IEM_MC_BEGIN(0, 1);
5579 IEM_MC_LOCAL(uint32_t, u32Value);
5580 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5581 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5582 IEM_MC_ADVANCE_RIP();
5583 IEM_MC_END();
5584 }
5585 else
5586 {
5587 IEM_MC_BEGIN(0, 1);
5588 IEM_MC_LOCAL(uint64_t, u64Value);
5589 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5590 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5591 IEM_MC_ADVANCE_RIP();
5592 IEM_MC_END();
5593 }
5594 }
5595 else
5596 {
5597 /*
5598 * We're loading a register from memory.
5599 */
5600 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5601 {
5602 IEM_MC_BEGIN(0, 2);
5603 IEM_MC_LOCAL(uint32_t, u32Value);
5604 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5605 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5606 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5607 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5608 IEM_MC_ADVANCE_RIP();
5609 IEM_MC_END();
5610 }
5611 else
5612 {
5613 IEM_MC_BEGIN(0, 2);
5614 IEM_MC_LOCAL(uint64_t, u64Value);
5615 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5616 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5617 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5618 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5619 IEM_MC_ADVANCE_RIP();
5620 IEM_MC_END();
5621 }
5622 }
5623 return VINF_SUCCESS;
5624}
5625
5626
5627/** Opcode 0x0f 0xb8. */
5628FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
5629
5630
5631/** Opcode 0x0f 0xb9. */
5632FNIEMOP_DEF(iemOp_Grp10)
5633{
5634 Log(("iemOp_Grp10 -> #UD\n"));
5635 return IEMOP_RAISE_INVALID_OPCODE();
5636}
5637
5638
5639/** Opcode 0x0f 0xba. */
5640FNIEMOP_DEF(iemOp_Grp8)
5641{
5642 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5643 PCIEMOPBINSIZES pImpl;
5644 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5645 {
5646 case 0: case 1: case 2: case 3:
5647 return IEMOP_RAISE_INVALID_OPCODE();
5648 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
5649 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
5650 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
5651 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
5652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5653 }
5654 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5655
5656 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5657 {
5658 /* register destination. */
5659 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5660 IEMOP_HLP_NO_LOCK_PREFIX();
5661
5662 switch (pIemCpu->enmEffOpSize)
5663 {
5664 case IEMMODE_16BIT:
5665 IEM_MC_BEGIN(3, 0);
5666 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5667 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
5668 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5669
5670 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5671 IEM_MC_REF_EFLAGS(pEFlags);
5672 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5673
5674 IEM_MC_ADVANCE_RIP();
5675 IEM_MC_END();
5676 return VINF_SUCCESS;
5677
5678 case IEMMODE_32BIT:
5679 IEM_MC_BEGIN(3, 0);
5680 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5681 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
5682 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5683
5684 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5685 IEM_MC_REF_EFLAGS(pEFlags);
5686 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5687
5688 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5689 IEM_MC_ADVANCE_RIP();
5690 IEM_MC_END();
5691 return VINF_SUCCESS;
5692
5693 case IEMMODE_64BIT:
5694 IEM_MC_BEGIN(3, 0);
5695 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5696 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
5697 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5698
5699 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5700 IEM_MC_REF_EFLAGS(pEFlags);
5701 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5702
5703 IEM_MC_ADVANCE_RIP();
5704 IEM_MC_END();
5705 return VINF_SUCCESS;
5706
5707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5708 }
5709 }
5710 else
5711 {
5712 /* memory destination. */
5713
5714 uint32_t fAccess;
5715 if (pImpl->pfnLockedU16)
5716 fAccess = IEM_ACCESS_DATA_RW;
5717 else /* BT */
5718 {
5719 IEMOP_HLP_NO_LOCK_PREFIX();
5720 fAccess = IEM_ACCESS_DATA_R;
5721 }
5722
5723 /** @todo test negative bit offsets! */
5724 switch (pIemCpu->enmEffOpSize)
5725 {
5726 case IEMMODE_16BIT:
5727 IEM_MC_BEGIN(3, 1);
5728 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5729 IEM_MC_ARG(uint16_t, u16Src, 1);
5730 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5731 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5732
5733 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5734 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5735 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
5736 IEM_MC_FETCH_EFLAGS(EFlags);
5737 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5738 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5739 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5740 else
5741 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
5742 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
5743
5744 IEM_MC_COMMIT_EFLAGS(EFlags);
5745 IEM_MC_ADVANCE_RIP();
5746 IEM_MC_END();
5747 return VINF_SUCCESS;
5748
5749 case IEMMODE_32BIT:
5750 IEM_MC_BEGIN(3, 1);
5751 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5752 IEM_MC_ARG(uint32_t, u32Src, 1);
5753 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5754 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5755
5756 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5757 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5758 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
5759 IEM_MC_FETCH_EFLAGS(EFlags);
5760 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5761 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5762 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5763 else
5764 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
5765 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
5766
5767 IEM_MC_COMMIT_EFLAGS(EFlags);
5768 IEM_MC_ADVANCE_RIP();
5769 IEM_MC_END();
5770 return VINF_SUCCESS;
5771
5772 case IEMMODE_64BIT:
5773 IEM_MC_BEGIN(3, 1);
5774 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5775 IEM_MC_ARG(uint64_t, u64Src, 1);
5776 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5777 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5778
5779 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5780 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5781 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
5782 IEM_MC_FETCH_EFLAGS(EFlags);
5783 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5784 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5785 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5786 else
5787 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
5788 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
5789
5790 IEM_MC_COMMIT_EFLAGS(EFlags);
5791 IEM_MC_ADVANCE_RIP();
5792 IEM_MC_END();
5793 return VINF_SUCCESS;
5794
5795 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5796 }
5797 }
5798
5799}
5800
5801
5802/** Opcode 0x0f 0xbb. */
5803FNIEMOP_DEF(iemOp_btc_Ev_Gv)
5804{
5805 IEMOP_MNEMONIC("btc Ev,Gv");
5806 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
5807}
5808
5809
5810/** Opcode 0x0f 0xbc. */
5811FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
5812{
5813 IEMOP_MNEMONIC("bsf Gv,Ev");
5814 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
5815 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
5816}
5817
5818
5819/** Opcode 0x0f 0xbd. */
5820FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
5821{
5822 IEMOP_MNEMONIC("bsr Gv,Ev");
5823 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
5824 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
5825}
5826
5827
5828/** Opcode 0x0f 0xbe. */
5829FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
5830{
5831 IEMOP_MNEMONIC("movsx Gv,Eb");
5832
5833 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5834 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5835
5836 /*
5837 * If rm is denoting a register, no more instruction bytes.
5838 */
5839 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5840 {
5841 switch (pIemCpu->enmEffOpSize)
5842 {
5843 case IEMMODE_16BIT:
5844 IEM_MC_BEGIN(0, 1);
5845 IEM_MC_LOCAL(uint16_t, u16Value);
5846 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5847 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5848 IEM_MC_ADVANCE_RIP();
5849 IEM_MC_END();
5850 return VINF_SUCCESS;
5851
5852 case IEMMODE_32BIT:
5853 IEM_MC_BEGIN(0, 1);
5854 IEM_MC_LOCAL(uint32_t, u32Value);
5855 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5856 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5857 IEM_MC_ADVANCE_RIP();
5858 IEM_MC_END();
5859 return VINF_SUCCESS;
5860
5861 case IEMMODE_64BIT:
5862 IEM_MC_BEGIN(0, 1);
5863 IEM_MC_LOCAL(uint64_t, u64Value);
5864 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5865 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5866 IEM_MC_ADVANCE_RIP();
5867 IEM_MC_END();
5868 return VINF_SUCCESS;
5869
5870 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5871 }
5872 }
5873 else
5874 {
5875 /*
5876 * We're loading a register from memory.
5877 */
5878 switch (pIemCpu->enmEffOpSize)
5879 {
5880 case IEMMODE_16BIT:
5881 IEM_MC_BEGIN(0, 2);
5882 IEM_MC_LOCAL(uint16_t, u16Value);
5883 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5884 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5885 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5886 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5887 IEM_MC_ADVANCE_RIP();
5888 IEM_MC_END();
5889 return VINF_SUCCESS;
5890
5891 case IEMMODE_32BIT:
5892 IEM_MC_BEGIN(0, 2);
5893 IEM_MC_LOCAL(uint32_t, u32Value);
5894 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5896 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5897 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5898 IEM_MC_ADVANCE_RIP();
5899 IEM_MC_END();
5900 return VINF_SUCCESS;
5901
5902 case IEMMODE_64BIT:
5903 IEM_MC_BEGIN(0, 2);
5904 IEM_MC_LOCAL(uint64_t, u64Value);
5905 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5906 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5907 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5908 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5909 IEM_MC_ADVANCE_RIP();
5910 IEM_MC_END();
5911 return VINF_SUCCESS;
5912
5913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5914 }
5915 }
5916}
5917
5918
5919/** Opcode 0x0f 0xbf. */
5920FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
5921{
5922 IEMOP_MNEMONIC("movsx Gv,Ew");
5923
5924 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5925 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5926
5927 /** @todo Not entirely sure how the operand size prefix is handled here,
5928 * assuming that it will be ignored. Would be nice to have a few
5929 * test for this. */
5930 /*
5931 * If rm is denoting a register, no more instruction bytes.
5932 */
5933 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5934 {
5935 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5936 {
5937 IEM_MC_BEGIN(0, 1);
5938 IEM_MC_LOCAL(uint32_t, u32Value);
5939 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5940 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5941 IEM_MC_ADVANCE_RIP();
5942 IEM_MC_END();
5943 }
5944 else
5945 {
5946 IEM_MC_BEGIN(0, 1);
5947 IEM_MC_LOCAL(uint64_t, u64Value);
5948 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5949 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5950 IEM_MC_ADVANCE_RIP();
5951 IEM_MC_END();
5952 }
5953 }
5954 else
5955 {
5956 /*
5957 * We're loading a register from memory.
5958 */
5959 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5960 {
5961 IEM_MC_BEGIN(0, 2);
5962 IEM_MC_LOCAL(uint32_t, u32Value);
5963 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5964 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5965 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5966 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5967 IEM_MC_ADVANCE_RIP();
5968 IEM_MC_END();
5969 }
5970 else
5971 {
5972 IEM_MC_BEGIN(0, 2);
5973 IEM_MC_LOCAL(uint64_t, u64Value);
5974 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5975 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5976 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5977 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5978 IEM_MC_ADVANCE_RIP();
5979 IEM_MC_END();
5980 }
5981 }
5982 return VINF_SUCCESS;
5983}
5984
5985
5986/** Opcode 0x0f 0xc0. */
5987FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
5988{
5989 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5990 IEMOP_MNEMONIC("xadd Eb,Gb");
5991
5992 /*
5993 * If rm is denoting a register, no more instruction bytes.
5994 */
5995 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5996 {
5997 IEMOP_HLP_NO_LOCK_PREFIX();
5998
5999 IEM_MC_BEGIN(3, 0);
6000 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6001 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6002 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6003
6004 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6005 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6006 IEM_MC_REF_EFLAGS(pEFlags);
6007 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6008
6009 IEM_MC_ADVANCE_RIP();
6010 IEM_MC_END();
6011 }
6012 else
6013 {
6014 /*
6015 * We're accessing memory.
6016 */
6017 IEM_MC_BEGIN(3, 3);
6018 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6019 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6020 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6021 IEM_MC_LOCAL(uint8_t, u8RegCopy);
6022 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6023
6024 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6025 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6026 IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6027 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
6028 IEM_MC_FETCH_EFLAGS(EFlags);
6029 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6030 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6031 else
6032 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
6033
6034 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
6035 IEM_MC_COMMIT_EFLAGS(EFlags);
6036 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8RegCopy);
6037 IEM_MC_ADVANCE_RIP();
6038 IEM_MC_END();
6039 return VINF_SUCCESS;
6040 }
6041 return VINF_SUCCESS;
6042}
6043
6044
6045/** Opcode 0x0f 0xc1. */
6046FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
6047{
6048 IEMOP_MNEMONIC("xadd Ev,Gv");
6049 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6050
6051 /*
6052 * If rm is denoting a register, no more instruction bytes.
6053 */
6054 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6055 {
6056 IEMOP_HLP_NO_LOCK_PREFIX();
6057
6058 switch (pIemCpu->enmEffOpSize)
6059 {
6060 case IEMMODE_16BIT:
6061 IEM_MC_BEGIN(3, 0);
6062 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6063 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6064 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6065
6066 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6067 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6068 IEM_MC_REF_EFLAGS(pEFlags);
6069 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6070
6071 IEM_MC_ADVANCE_RIP();
6072 IEM_MC_END();
6073 return VINF_SUCCESS;
6074
6075 case IEMMODE_32BIT:
6076 IEM_MC_BEGIN(3, 0);
6077 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6078 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6079 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6080
6081 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6082 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6083 IEM_MC_REF_EFLAGS(pEFlags);
6084 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6085
6086 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6087 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
6088 IEM_MC_ADVANCE_RIP();
6089 IEM_MC_END();
6090 return VINF_SUCCESS;
6091
6092 case IEMMODE_64BIT:
6093 IEM_MC_BEGIN(3, 0);
6094 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6095 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6096 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6097
6098 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6099 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6100 IEM_MC_REF_EFLAGS(pEFlags);
6101 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6102
6103 IEM_MC_ADVANCE_RIP();
6104 IEM_MC_END();
6105 return VINF_SUCCESS;
6106
6107 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6108 }
6109 }
6110 else
6111 {
6112 /*
6113 * We're accessing memory.
6114 */
6115 switch (pIemCpu->enmEffOpSize)
6116 {
6117 case IEMMODE_16BIT:
6118 IEM_MC_BEGIN(3, 3);
6119 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6120 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6121 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6122 IEM_MC_LOCAL(uint16_t, u16RegCopy);
6123 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6124
6125 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6126 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6127 IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6128 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
6129 IEM_MC_FETCH_EFLAGS(EFlags);
6130 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6131 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6132 else
6133 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
6134
6135 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
6136 IEM_MC_COMMIT_EFLAGS(EFlags);
6137 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16RegCopy);
6138 IEM_MC_ADVANCE_RIP();
6139 IEM_MC_END();
6140 return VINF_SUCCESS;
6141
6142 case IEMMODE_32BIT:
6143 IEM_MC_BEGIN(3, 3);
6144 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6145 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6146 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6147 IEM_MC_LOCAL(uint32_t, u32RegCopy);
6148 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6149
6150 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6151 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6152 IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6153 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
6154 IEM_MC_FETCH_EFLAGS(EFlags);
6155 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6156 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6157 else
6158 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
6159
6160 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
6161 IEM_MC_COMMIT_EFLAGS(EFlags);
6162 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32RegCopy);
6163 IEM_MC_ADVANCE_RIP();
6164 IEM_MC_END();
6165 return VINF_SUCCESS;
6166
6167 case IEMMODE_64BIT:
6168 IEM_MC_BEGIN(3, 3);
6169 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6170 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6171 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6172 IEM_MC_LOCAL(uint64_t, u64RegCopy);
6173 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6174
6175 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6176 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6177 IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6178 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
6179 IEM_MC_FETCH_EFLAGS(EFlags);
6180 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6181 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6182 else
6183 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
6184
6185 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
6186 IEM_MC_COMMIT_EFLAGS(EFlags);
6187 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64RegCopy);
6188 IEM_MC_ADVANCE_RIP();
6189 IEM_MC_END();
6190 return VINF_SUCCESS;
6191
6192 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6193 }
6194 }
6195}
6196
6197/** Opcode 0x0f 0xc2. */
6198FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
6199
6200/** Opcode 0x0f 0xc3. */
6201FNIEMOP_STUB(iemOp_movnti_My_Gy);
6202
6203/** Opcode 0x0f 0xc4. */
6204FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
6205
6206/** Opcode 0x0f 0xc5. */
6207FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
6208
6209/** Opcode 0x0f 0xc6. */
6210FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
6211
6212
6213/** Opcode 0x0f 0xc7 !11/1. */
6214FNIEMOP_DEF_1(iemOp_Grp9_cmpxchg8b_Mq, uint8_t, bRm)
6215{
6216 IEMOP_MNEMONIC("cmpxchg8b Mq");
6217
6218 IEM_MC_BEGIN(4, 3);
6219 IEM_MC_ARG(uint64_t *, pu64MemDst, 0);
6220 IEM_MC_ARG(PRTUINT64U, pu64EaxEdx, 1);
6221 IEM_MC_ARG(PRTUINT64U, pu64EbxEcx, 2);
6222 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3);
6223 IEM_MC_LOCAL(RTUINT64U, u64EaxEdx);
6224 IEM_MC_LOCAL(RTUINT64U, u64EbxEcx);
6225 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6226
6227 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6228 IEMOP_HLP_DONE_DECODING();
6229 IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6230
6231 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Lo, X86_GREG_xAX);
6232 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Hi, X86_GREG_xDX);
6233 IEM_MC_REF_LOCAL(pu64EaxEdx, u64EaxEdx);
6234
6235 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Lo, X86_GREG_xBX);
6236 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Hi, X86_GREG_xCX);
6237 IEM_MC_REF_LOCAL(pu64EbxEcx, u64EbxEcx);
6238
6239 IEM_MC_FETCH_EFLAGS(EFlags);
6240 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6241 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6242 else
6243 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b_locked, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6244
6245 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64MemDst, IEM_ACCESS_DATA_RW);
6246 IEM_MC_COMMIT_EFLAGS(EFlags);
6247 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
6248 /** @todo Testcase: Check effect of cmpxchg8b on bits 63:32 in rax and rdx. */
6249 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u64EaxEdx.s.Lo);
6250 IEM_MC_STORE_GREG_U32(X86_GREG_xDX, u64EaxEdx.s.Hi);
6251 IEM_MC_ENDIF();
6252 IEM_MC_ADVANCE_RIP();
6253
6254 IEM_MC_END();
6255 return VINF_SUCCESS;
6256}
6257
6258
6259/** Opcode REX.W 0x0f 0xc7 !11/1. */
6260FNIEMOP_UD_STUB_1(iemOp_Grp9_cmpxchg16b_Mdq, uint8_t, bRm);
6261
6262/** Opcode 0x0f 0xc7 11/6. */
6263FNIEMOP_UD_STUB_1(iemOp_Grp9_rdrand_Rv, uint8_t, bRm);
6264
6265/** Opcode 0x0f 0xc7 !11/6. */
6266FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrld_Mq, uint8_t, bRm);
6267
6268/** Opcode 0x66 0x0f 0xc7 !11/6. */
6269FNIEMOP_UD_STUB_1(iemOp_Grp9_vmclear_Mq, uint8_t, bRm);
6270
6271/** Opcode 0xf3 0x0f 0xc7 !11/6. */
6272FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm);
6273
6274/** Opcode [0xf3] 0x0f 0xc7 !11/7. */
6275FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrst_Mq, uint8_t, bRm);
6276
6277
6278/** Opcode 0x0f 0xc7. */
6279FNIEMOP_DEF(iemOp_Grp9)
6280{
6281 /** @todo Testcase: Check mixing 0x66 and 0xf3. Check the effect of 0xf2. */
6282 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6283 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
6284 {
6285 case 0: case 2: case 3: case 4: case 5:
6286 return IEMOP_RAISE_INVALID_OPCODE();
6287 case 1:
6288 /** @todo Testcase: Check prefix effects on cmpxchg8b/16b. */
6289 if ( (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)
6290 || (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */
6291 return IEMOP_RAISE_INVALID_OPCODE();
6292 if (bRm & IEM_OP_PRF_SIZE_REX_W)
6293 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg16b_Mdq, bRm);
6294 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg8b_Mq, bRm);
6295 case 6:
6296 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6297 return FNIEMOP_CALL_1(iemOp_Grp9_rdrand_Rv, bRm);
6298 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6299 {
6300 case 0:
6301 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrld_Mq, bRm);
6302 case IEM_OP_PRF_SIZE_OP:
6303 return FNIEMOP_CALL_1(iemOp_Grp9_vmclear_Mq, bRm);
6304 case IEM_OP_PRF_REPZ:
6305 return FNIEMOP_CALL_1(iemOp_Grp9_vmxon_Mq, bRm);
6306 default:
6307 return IEMOP_RAISE_INVALID_OPCODE();
6308 }
6309 case 7:
6310 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6311 {
6312 case 0:
6313 case IEM_OP_PRF_REPZ:
6314 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrst_Mq, bRm);
6315 default:
6316 return IEMOP_RAISE_INVALID_OPCODE();
6317 }
6318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6319 }
6320}
6321
6322
6323/**
6324 * Common 'bswap register' helper.
6325 */
6326FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
6327{
6328 IEMOP_HLP_NO_LOCK_PREFIX();
6329 switch (pIemCpu->enmEffOpSize)
6330 {
6331 case IEMMODE_16BIT:
6332 IEM_MC_BEGIN(1, 0);
6333 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6334 IEM_MC_REF_GREG_U32(pu32Dst, iReg); /* Don't clear the high dword! */
6335 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u16, pu32Dst);
6336 IEM_MC_ADVANCE_RIP();
6337 IEM_MC_END();
6338 return VINF_SUCCESS;
6339
6340 case IEMMODE_32BIT:
6341 IEM_MC_BEGIN(1, 0);
6342 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6343 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
6344 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6345 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u32, pu32Dst);
6346 IEM_MC_ADVANCE_RIP();
6347 IEM_MC_END();
6348 return VINF_SUCCESS;
6349
6350 case IEMMODE_64BIT:
6351 IEM_MC_BEGIN(1, 0);
6352 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6353 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
6354 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u64, pu64Dst);
6355 IEM_MC_ADVANCE_RIP();
6356 IEM_MC_END();
6357 return VINF_SUCCESS;
6358
6359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6360 }
6361}
6362
6363
6364/** Opcode 0x0f 0xc8. */
6365FNIEMOP_DEF(iemOp_bswap_rAX_r8)
6366{
6367 IEMOP_MNEMONIC("bswap rAX/r8");
6368 /* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X
6369 prefix. REX.B is the correct prefix it appears. For a parallel
6370 case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */
6371 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexB);
6372}
6373
6374
6375/** Opcode 0x0f 0xc9. */
6376FNIEMOP_DEF(iemOp_bswap_rCX_r9)
6377{
6378 IEMOP_MNEMONIC("bswap rCX/r9");
6379 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexB);
6380}
6381
6382
6383/** Opcode 0x0f 0xca. */
6384FNIEMOP_DEF(iemOp_bswap_rDX_r10)
6385{
6386 IEMOP_MNEMONIC("bswap rDX/r9");
6387 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexB);
6388}
6389
6390
6391/** Opcode 0x0f 0xcb. */
6392FNIEMOP_DEF(iemOp_bswap_rBX_r11)
6393{
6394 IEMOP_MNEMONIC("bswap rBX/r9");
6395 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexB);
6396}
6397
6398
6399/** Opcode 0x0f 0xcc. */
6400FNIEMOP_DEF(iemOp_bswap_rSP_r12)
6401{
6402 IEMOP_MNEMONIC("bswap rSP/r12");
6403 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexB);
6404}
6405
6406
6407/** Opcode 0x0f 0xcd. */
6408FNIEMOP_DEF(iemOp_bswap_rBP_r13)
6409{
6410 IEMOP_MNEMONIC("bswap rBP/r13");
6411 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexB);
6412}
6413
6414
6415/** Opcode 0x0f 0xce. */
6416FNIEMOP_DEF(iemOp_bswap_rSI_r14)
6417{
6418 IEMOP_MNEMONIC("bswap rSI/r14");
6419 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexB);
6420}
6421
6422
6423/** Opcode 0x0f 0xcf. */
6424FNIEMOP_DEF(iemOp_bswap_rDI_r15)
6425{
6426 IEMOP_MNEMONIC("bswap rDI/r15");
6427 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexB);
6428}
6429
6430
6431
6432/** Opcode 0x0f 0xd0. */
6433FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
6434/** Opcode 0x0f 0xd1. */
6435FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
6436/** Opcode 0x0f 0xd2. */
6437FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
6438/** Opcode 0x0f 0xd3. */
6439FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
6440/** Opcode 0x0f 0xd4. */
6441FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
6442/** Opcode 0x0f 0xd5. */
6443FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
6444/** Opcode 0x0f 0xd6. */
6445FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
6446
6447
6448/** Opcode 0x0f 0xd7. */
6449FNIEMOP_DEF(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq)
6450{
6451 /* Docs says register only. */
6452 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6453 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) /** @todo test that this is registers only. */
6454 return IEMOP_RAISE_INVALID_OPCODE();
6455
6456 /* Note! Taking the lazy approch here wrt the high 32-bits of the GREG. */
6457 /** @todo testcase: Check that the instruction implicitly clears the high
6458 * bits in 64-bit mode. The REX.W is first necessary when VLMAX > 256
6459 * and opcode modifications are made to work with the whole width (not
6460 * just 128). */
6461 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6462 {
6463 case IEM_OP_PRF_SIZE_OP: /* SSE */
6464 IEMOP_MNEMONIC("pmovmskb Gd,Nq");
6465 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_SSE | DISOPTYPE_HARMLESS);
6466 IEM_MC_BEGIN(2, 0);
6467 IEM_MC_ARG(uint64_t *, pDst, 0);
6468 IEM_MC_ARG(uint128_t const *, pSrc, 1);
6469 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
6470 IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6471 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6472 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc);
6473 IEM_MC_ADVANCE_RIP();
6474 IEM_MC_END();
6475 return VINF_SUCCESS;
6476
6477 case 0: /* MMX */
6478 IEMOP_MNEMONIC("pmovmskb Gd,Udq");
6479 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_MMX | DISOPTYPE_HARMLESS);
6480 IEM_MC_BEGIN(2, 0);
6481 IEM_MC_ARG(uint64_t *, pDst, 0);
6482 IEM_MC_ARG(uint64_t const *, pSrc, 1);
6483 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
6484 IEM_MC_REF_GREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
6485 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
6486 IEM_MC_CALL_MMX_AIMPL_2(iemAImpl_pmovmskb_u64, pDst, pSrc);
6487 IEM_MC_ADVANCE_RIP();
6488 IEM_MC_END();
6489 return VINF_SUCCESS;
6490
6491 default:
6492 return IEMOP_RAISE_INVALID_OPCODE();
6493 }
6494}
6495
6496
6497/** Opcode 0x0f 0xd8. */
6498FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
6499/** Opcode 0x0f 0xd9. */
6500FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
6501/** Opcode 0x0f 0xda. */
6502FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
6503/** Opcode 0x0f 0xdb. */
6504FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
6505/** Opcode 0x0f 0xdc. */
6506FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
6507/** Opcode 0x0f 0xdd. */
6508FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
6509/** Opcode 0x0f 0xde. */
6510FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
6511/** Opcode 0x0f 0xdf. */
6512FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
6513/** Opcode 0x0f 0xe0. */
6514FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
6515/** Opcode 0x0f 0xe1. */
6516FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
6517/** Opcode 0x0f 0xe2. */
6518FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
6519/** Opcode 0x0f 0xe3. */
6520FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
6521/** Opcode 0x0f 0xe4. */
6522FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
6523/** Opcode 0x0f 0xe5. */
6524FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
6525/** Opcode 0x0f 0xe6. */
6526FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
6527/** Opcode 0x0f 0xe7. */
6528FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
6529/** Opcode 0x0f 0xe8. */
6530FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
6531/** Opcode 0x0f 0xe9. */
6532FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
6533/** Opcode 0x0f 0xea. */
6534FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
6535/** Opcode 0x0f 0xeb. */
6536FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
6537/** Opcode 0x0f 0xec. */
6538FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
6539/** Opcode 0x0f 0xed. */
6540FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
6541/** Opcode 0x0f 0xee. */
6542FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
6543
6544
6545/** Opcode 0x0f 0xef. */
6546FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq)
6547{
6548 IEMOP_MNEMONIC("pxor");
6549 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pxor);
6550}
6551
6552
6553/** Opcode 0x0f 0xf0. */
6554FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
6555/** Opcode 0x0f 0xf1. */
6556FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
6557/** Opcode 0x0f 0xf2. */
6558FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
6559/** Opcode 0x0f 0xf3. */
6560FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
6561/** Opcode 0x0f 0xf4. */
6562FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
6563/** Opcode 0x0f 0xf5. */
6564FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
6565/** Opcode 0x0f 0xf6. */
6566FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
6567/** Opcode 0x0f 0xf7. */
6568FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
6569/** Opcode 0x0f 0xf8. */
6570FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); //NEXT
6571/** Opcode 0x0f 0xf9. */
6572FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
6573/** Opcode 0x0f 0xfa. */
6574FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
6575/** Opcode 0x0f 0xfb. */
6576FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
6577/** Opcode 0x0f 0xfc. */
6578FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
6579/** Opcode 0x0f 0xfd. */
6580FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
6581/** Opcode 0x0f 0xfe. */
6582FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
6583
6584
6585const PFNIEMOP g_apfnTwoByteMap[256] =
6586{
6587 /* 0x00 */ iemOp_Grp6,
6588 /* 0x01 */ iemOp_Grp7,
6589 /* 0x02 */ iemOp_lar_Gv_Ew,
6590 /* 0x03 */ iemOp_lsl_Gv_Ew,
6591 /* 0x04 */ iemOp_Invalid,
6592 /* 0x05 */ iemOp_syscall,
6593 /* 0x06 */ iemOp_clts,
6594 /* 0x07 */ iemOp_sysret,
6595 /* 0x08 */ iemOp_invd,
6596 /* 0x09 */ iemOp_wbinvd,
6597 /* 0x0a */ iemOp_Invalid,
6598 /* 0x0b */ iemOp_ud2,
6599 /* 0x0c */ iemOp_Invalid,
6600 /* 0x0d */ iemOp_nop_Ev_GrpP,
6601 /* 0x0e */ iemOp_femms,
6602 /* 0x0f */ iemOp_3Dnow,
6603 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
6604 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
6605 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
6606 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
6607 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
6608 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
6609 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
6610 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
6611 /* 0x18 */ iemOp_prefetch_Grp16,
6612 /* 0x19 */ iemOp_nop_Ev,
6613 /* 0x1a */ iemOp_nop_Ev,
6614 /* 0x1b */ iemOp_nop_Ev,
6615 /* 0x1c */ iemOp_nop_Ev,
6616 /* 0x1d */ iemOp_nop_Ev,
6617 /* 0x1e */ iemOp_nop_Ev,
6618 /* 0x1f */ iemOp_nop_Ev,
6619 /* 0x20 */ iemOp_mov_Rd_Cd,
6620 /* 0x21 */ iemOp_mov_Rd_Dd,
6621 /* 0x22 */ iemOp_mov_Cd_Rd,
6622 /* 0x23 */ iemOp_mov_Dd_Rd,
6623 /* 0x24 */ iemOp_mov_Rd_Td,
6624 /* 0x25 */ iemOp_Invalid,
6625 /* 0x26 */ iemOp_mov_Td_Rd,
6626 /* 0x27 */ iemOp_Invalid,
6627 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
6628 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
6629 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
6630 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
6631 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
6632 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
6633 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
6634 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
6635 /* 0x30 */ iemOp_wrmsr,
6636 /* 0x31 */ iemOp_rdtsc,
6637 /* 0x32 */ iemOp_rdmsr,
6638 /* 0x33 */ iemOp_rdpmc,
6639 /* 0x34 */ iemOp_sysenter,
6640 /* 0x35 */ iemOp_sysexit,
6641 /* 0x36 */ iemOp_Invalid,
6642 /* 0x37 */ iemOp_getsec,
6643 /* 0x38 */ iemOp_3byte_Esc_A4,
6644 /* 0x39 */ iemOp_Invalid,
6645 /* 0x3a */ iemOp_3byte_Esc_A5,
6646 /* 0x3b */ iemOp_Invalid,
6647 /* 0x3c */ iemOp_movnti_Gv_Ev/*??*/,
6648 /* 0x3d */ iemOp_Invalid,
6649 /* 0x3e */ iemOp_Invalid,
6650 /* 0x3f */ iemOp_Invalid,
6651 /* 0x40 */ iemOp_cmovo_Gv_Ev,
6652 /* 0x41 */ iemOp_cmovno_Gv_Ev,
6653 /* 0x42 */ iemOp_cmovc_Gv_Ev,
6654 /* 0x43 */ iemOp_cmovnc_Gv_Ev,
6655 /* 0x44 */ iemOp_cmove_Gv_Ev,
6656 /* 0x45 */ iemOp_cmovne_Gv_Ev,
6657 /* 0x46 */ iemOp_cmovbe_Gv_Ev,
6658 /* 0x47 */ iemOp_cmovnbe_Gv_Ev,
6659 /* 0x48 */ iemOp_cmovs_Gv_Ev,
6660 /* 0x49 */ iemOp_cmovns_Gv_Ev,
6661 /* 0x4a */ iemOp_cmovp_Gv_Ev,
6662 /* 0x4b */ iemOp_cmovnp_Gv_Ev,
6663 /* 0x4c */ iemOp_cmovl_Gv_Ev,
6664 /* 0x4d */ iemOp_cmovnl_Gv_Ev,
6665 /* 0x4e */ iemOp_cmovle_Gv_Ev,
6666 /* 0x4f */ iemOp_cmovnle_Gv_Ev,
6667 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
6668 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
6669 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
6670 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
6671 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
6672 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
6673 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
6674 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
6675 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
6676 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
6677 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
6678 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
6679 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
6680 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
6681 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
6682 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
6683 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
6684 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
6685 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
6686 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
6687 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
6688 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
6689 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
6690 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
6691 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
6692 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
6693 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
6694 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
6695 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
6696 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
6697 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
6698 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
6699 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
6700 /* 0x71 */ iemOp_Grp12,
6701 /* 0x72 */ iemOp_Grp13,
6702 /* 0x73 */ iemOp_Grp14,
6703 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
6704 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
6705 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
6706 /* 0x77 */ iemOp_emms,
6707 /* 0x78 */ iemOp_vmread_AmdGrp17,
6708 /* 0x79 */ iemOp_vmwrite,
6709 /* 0x7a */ iemOp_Invalid,
6710 /* 0x7b */ iemOp_Invalid,
6711 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
6712 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
6713 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
6714 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
6715 /* 0x80 */ iemOp_jo_Jv,
6716 /* 0x81 */ iemOp_jno_Jv,
6717 /* 0x82 */ iemOp_jc_Jv,
6718 /* 0x83 */ iemOp_jnc_Jv,
6719 /* 0x84 */ iemOp_je_Jv,
6720 /* 0x85 */ iemOp_jne_Jv,
6721 /* 0x86 */ iemOp_jbe_Jv,
6722 /* 0x87 */ iemOp_jnbe_Jv,
6723 /* 0x88 */ iemOp_js_Jv,
6724 /* 0x89 */ iemOp_jns_Jv,
6725 /* 0x8a */ iemOp_jp_Jv,
6726 /* 0x8b */ iemOp_jnp_Jv,
6727 /* 0x8c */ iemOp_jl_Jv,
6728 /* 0x8d */ iemOp_jnl_Jv,
6729 /* 0x8e */ iemOp_jle_Jv,
6730 /* 0x8f */ iemOp_jnle_Jv,
6731 /* 0x90 */ iemOp_seto_Eb,
6732 /* 0x91 */ iemOp_setno_Eb,
6733 /* 0x92 */ iemOp_setc_Eb,
6734 /* 0x93 */ iemOp_setnc_Eb,
6735 /* 0x94 */ iemOp_sete_Eb,
6736 /* 0x95 */ iemOp_setne_Eb,
6737 /* 0x96 */ iemOp_setbe_Eb,
6738 /* 0x97 */ iemOp_setnbe_Eb,
6739 /* 0x98 */ iemOp_sets_Eb,
6740 /* 0x99 */ iemOp_setns_Eb,
6741 /* 0x9a */ iemOp_setp_Eb,
6742 /* 0x9b */ iemOp_setnp_Eb,
6743 /* 0x9c */ iemOp_setl_Eb,
6744 /* 0x9d */ iemOp_setnl_Eb,
6745 /* 0x9e */ iemOp_setle_Eb,
6746 /* 0x9f */ iemOp_setnle_Eb,
6747 /* 0xa0 */ iemOp_push_fs,
6748 /* 0xa1 */ iemOp_pop_fs,
6749 /* 0xa2 */ iemOp_cpuid,
6750 /* 0xa3 */ iemOp_bt_Ev_Gv,
6751 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib,
6752 /* 0xa5 */ iemOp_shld_Ev_Gv_CL,
6753 /* 0xa6 */ iemOp_Invalid,
6754 /* 0xa7 */ iemOp_Invalid,
6755 /* 0xa8 */ iemOp_push_gs,
6756 /* 0xa9 */ iemOp_pop_gs,
6757 /* 0xaa */ iemOp_rsm,
6758 /* 0xab */ iemOp_bts_Ev_Gv,
6759 /* 0xac */ iemOp_shrd_Ev_Gv_Ib,
6760 /* 0xad */ iemOp_shrd_Ev_Gv_CL,
6761 /* 0xae */ iemOp_Grp15,
6762 /* 0xaf */ iemOp_imul_Gv_Ev,
6763 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb,
6764 /* 0xb1 */ iemOp_cmpxchg_Ev_Gv,
6765 /* 0xb2 */ iemOp_lss_Gv_Mp,
6766 /* 0xb3 */ iemOp_btr_Ev_Gv,
6767 /* 0xb4 */ iemOp_lfs_Gv_Mp,
6768 /* 0xb5 */ iemOp_lgs_Gv_Mp,
6769 /* 0xb6 */ iemOp_movzx_Gv_Eb,
6770 /* 0xb7 */ iemOp_movzx_Gv_Ew,
6771 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,
6772 /* 0xb9 */ iemOp_Grp10,
6773 /* 0xba */ iemOp_Grp8,
6774 /* 0xbd */ iemOp_btc_Ev_Gv,
6775 /* 0xbc */ iemOp_bsf_Gv_Ev,
6776 /* 0xbd */ iemOp_bsr_Gv_Ev,
6777 /* 0xbe */ iemOp_movsx_Gv_Eb,
6778 /* 0xbf */ iemOp_movsx_Gv_Ew,
6779 /* 0xc0 */ iemOp_xadd_Eb_Gb,
6780 /* 0xc1 */ iemOp_xadd_Ev_Gv,
6781 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
6782 /* 0xc3 */ iemOp_movnti_My_Gy,
6783 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
6784 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
6785 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
6786 /* 0xc7 */ iemOp_Grp9,
6787 /* 0xc8 */ iemOp_bswap_rAX_r8,
6788 /* 0xc9 */ iemOp_bswap_rCX_r9,
6789 /* 0xca */ iemOp_bswap_rDX_r10,
6790 /* 0xcb */ iemOp_bswap_rBX_r11,
6791 /* 0xcc */ iemOp_bswap_rSP_r12,
6792 /* 0xcd */ iemOp_bswap_rBP_r13,
6793 /* 0xce */ iemOp_bswap_rSI_r14,
6794 /* 0xcf */ iemOp_bswap_rDI_r15,
6795 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
6796 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
6797 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
6798 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
6799 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
6800 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
6801 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
6802 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
6803 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
6804 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
6805 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
6806 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
6807 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
6808 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
6809 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
6810 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
6811 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
6812 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
6813 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
6814 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
6815 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
6816 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
6817 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
6818 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
6819 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
6820 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
6821 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
6822 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
6823 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
6824 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
6825 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
6826 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
6827 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
6828 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
6829 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
6830 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
6831 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
6832 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
6833 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
6834 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
6835 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
6836 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
6837 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
6838 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
6839 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
6840 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
6841 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
6842 /* 0xff */ iemOp_Invalid
6843};
6844
6845/** @} */
6846
6847
6848/** @name One byte opcodes.
6849 *
6850 * @{
6851 */
6852
6853/** Opcode 0x00. */
6854FNIEMOP_DEF(iemOp_add_Eb_Gb)
6855{
6856 IEMOP_MNEMONIC("add Eb,Gb");
6857 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
6858}
6859
6860
6861/** Opcode 0x01. */
6862FNIEMOP_DEF(iemOp_add_Ev_Gv)
6863{
6864 IEMOP_MNEMONIC("add Ev,Gv");
6865 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
6866}
6867
6868
6869/** Opcode 0x02. */
6870FNIEMOP_DEF(iemOp_add_Gb_Eb)
6871{
6872 IEMOP_MNEMONIC("add Gb,Eb");
6873 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
6874}
6875
6876
6877/** Opcode 0x03. */
6878FNIEMOP_DEF(iemOp_add_Gv_Ev)
6879{
6880 IEMOP_MNEMONIC("add Gv,Ev");
6881 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
6882}
6883
6884
6885/** Opcode 0x04. */
6886FNIEMOP_DEF(iemOp_add_Al_Ib)
6887{
6888 IEMOP_MNEMONIC("add al,Ib");
6889 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
6890}
6891
6892
6893/** Opcode 0x05. */
6894FNIEMOP_DEF(iemOp_add_eAX_Iz)
6895{
6896 IEMOP_MNEMONIC("add rAX,Iz");
6897 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
6898}
6899
6900
6901/** Opcode 0x06. */
6902FNIEMOP_DEF(iemOp_push_ES)
6903{
6904 IEMOP_MNEMONIC("push es");
6905 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
6906}
6907
6908
6909/** Opcode 0x07. */
6910FNIEMOP_DEF(iemOp_pop_ES)
6911{
6912 IEMOP_MNEMONIC("pop es");
6913 IEMOP_HLP_NO_64BIT();
6914 IEMOP_HLP_NO_LOCK_PREFIX();
6915 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
6916}
6917
6918
6919/** Opcode 0x08. */
6920FNIEMOP_DEF(iemOp_or_Eb_Gb)
6921{
6922 IEMOP_MNEMONIC("or Eb,Gb");
6923 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6924 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
6925}
6926
6927
6928/** Opcode 0x09. */
6929FNIEMOP_DEF(iemOp_or_Ev_Gv)
6930{
6931 IEMOP_MNEMONIC("or Ev,Gv ");
6932 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6933 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
6934}
6935
6936
6937/** Opcode 0x0a. */
6938FNIEMOP_DEF(iemOp_or_Gb_Eb)
6939{
6940 IEMOP_MNEMONIC("or Gb,Eb");
6941 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6942 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
6943}
6944
6945
6946/** Opcode 0x0b. */
6947FNIEMOP_DEF(iemOp_or_Gv_Ev)
6948{
6949 IEMOP_MNEMONIC("or Gv,Ev");
6950 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6951 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
6952}
6953
6954
6955/** Opcode 0x0c. */
6956FNIEMOP_DEF(iemOp_or_Al_Ib)
6957{
6958 IEMOP_MNEMONIC("or al,Ib");
6959 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6960 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
6961}
6962
6963
6964/** Opcode 0x0d. */
6965FNIEMOP_DEF(iemOp_or_eAX_Iz)
6966{
6967 IEMOP_MNEMONIC("or rAX,Iz");
6968 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6969 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
6970}
6971
6972
6973/** Opcode 0x0e. */
6974FNIEMOP_DEF(iemOp_push_CS)
6975{
6976 IEMOP_MNEMONIC("push cs");
6977 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
6978}
6979
6980
6981/** Opcode 0x0f. */
6982FNIEMOP_DEF(iemOp_2byteEscape)
6983{
6984 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6985 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
6986}
6987
6988/** Opcode 0x10. */
6989FNIEMOP_DEF(iemOp_adc_Eb_Gb)
6990{
6991 IEMOP_MNEMONIC("adc Eb,Gb");
6992 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
6993}
6994
6995
6996/** Opcode 0x11. */
6997FNIEMOP_DEF(iemOp_adc_Ev_Gv)
6998{
6999 IEMOP_MNEMONIC("adc Ev,Gv");
7000 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
7001}
7002
7003
7004/** Opcode 0x12. */
7005FNIEMOP_DEF(iemOp_adc_Gb_Eb)
7006{
7007 IEMOP_MNEMONIC("adc Gb,Eb");
7008 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
7009}
7010
7011
7012/** Opcode 0x13. */
7013FNIEMOP_DEF(iemOp_adc_Gv_Ev)
7014{
7015 IEMOP_MNEMONIC("adc Gv,Ev");
7016 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
7017}
7018
7019
7020/** Opcode 0x14. */
7021FNIEMOP_DEF(iemOp_adc_Al_Ib)
7022{
7023 IEMOP_MNEMONIC("adc al,Ib");
7024 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
7025}
7026
7027
7028/** Opcode 0x15. */
7029FNIEMOP_DEF(iemOp_adc_eAX_Iz)
7030{
7031 IEMOP_MNEMONIC("adc rAX,Iz");
7032 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
7033}
7034
7035
7036/** Opcode 0x16. */
7037FNIEMOP_DEF(iemOp_push_SS)
7038{
7039 IEMOP_MNEMONIC("push ss");
7040 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
7041}
7042
7043
7044/** Opcode 0x17. */
7045FNIEMOP_DEF(iemOp_pop_SS)
7046{
7047 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
7048 IEMOP_HLP_NO_LOCK_PREFIX();
7049 IEMOP_HLP_NO_64BIT();
7050 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
7051}
7052
7053
7054/** Opcode 0x18. */
7055FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
7056{
7057 IEMOP_MNEMONIC("sbb Eb,Gb");
7058 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
7059}
7060
7061
7062/** Opcode 0x19. */
7063FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
7064{
7065 IEMOP_MNEMONIC("sbb Ev,Gv");
7066 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
7067}
7068
7069
7070/** Opcode 0x1a. */
7071FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
7072{
7073 IEMOP_MNEMONIC("sbb Gb,Eb");
7074 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
7075}
7076
7077
7078/** Opcode 0x1b. */
7079FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
7080{
7081 IEMOP_MNEMONIC("sbb Gv,Ev");
7082 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
7083}
7084
7085
7086/** Opcode 0x1c. */
7087FNIEMOP_DEF(iemOp_sbb_Al_Ib)
7088{
7089 IEMOP_MNEMONIC("sbb al,Ib");
7090 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
7091}
7092
7093
7094/** Opcode 0x1d. */
7095FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
7096{
7097 IEMOP_MNEMONIC("sbb rAX,Iz");
7098 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
7099}
7100
7101
7102/** Opcode 0x1e. */
7103FNIEMOP_DEF(iemOp_push_DS)
7104{
7105 IEMOP_MNEMONIC("push ds");
7106 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
7107}
7108
7109
7110/** Opcode 0x1f. */
7111FNIEMOP_DEF(iemOp_pop_DS)
7112{
7113 IEMOP_MNEMONIC("pop ds");
7114 IEMOP_HLP_NO_LOCK_PREFIX();
7115 IEMOP_HLP_NO_64BIT();
7116 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
7117}
7118
7119
7120/** Opcode 0x20. */
7121FNIEMOP_DEF(iemOp_and_Eb_Gb)
7122{
7123 IEMOP_MNEMONIC("and Eb,Gb");
7124 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7125 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
7126}
7127
7128
7129/** Opcode 0x21. */
7130FNIEMOP_DEF(iemOp_and_Ev_Gv)
7131{
7132 IEMOP_MNEMONIC("and Ev,Gv");
7133 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7134 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
7135}
7136
7137
7138/** Opcode 0x22. */
7139FNIEMOP_DEF(iemOp_and_Gb_Eb)
7140{
7141 IEMOP_MNEMONIC("and Gb,Eb");
7142 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7143 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
7144}
7145
7146
7147/** Opcode 0x23. */
7148FNIEMOP_DEF(iemOp_and_Gv_Ev)
7149{
7150 IEMOP_MNEMONIC("and Gv,Ev");
7151 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7152 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
7153}
7154
7155
7156/** Opcode 0x24. */
7157FNIEMOP_DEF(iemOp_and_Al_Ib)
7158{
7159 IEMOP_MNEMONIC("and al,Ib");
7160 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7161 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
7162}
7163
7164
7165/** Opcode 0x25. */
7166FNIEMOP_DEF(iemOp_and_eAX_Iz)
7167{
7168 IEMOP_MNEMONIC("and rAX,Iz");
7169 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7170 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
7171}
7172
7173
7174/** Opcode 0x26. */
7175FNIEMOP_DEF(iemOp_seg_ES)
7176{
7177 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es");
7178 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
7179 pIemCpu->iEffSeg = X86_SREG_ES;
7180
7181 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7182 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7183}
7184
7185
7186/** Opcode 0x27. */
7187FNIEMOP_STUB(iemOp_daa);
7188
7189
7190/** Opcode 0x28. */
7191FNIEMOP_DEF(iemOp_sub_Eb_Gb)
7192{
7193 IEMOP_MNEMONIC("sub Eb,Gb");
7194 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
7195}
7196
7197
7198/** Opcode 0x29. */
7199FNIEMOP_DEF(iemOp_sub_Ev_Gv)
7200{
7201 IEMOP_MNEMONIC("sub Ev,Gv");
7202 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
7203}
7204
7205
7206/** Opcode 0x2a. */
7207FNIEMOP_DEF(iemOp_sub_Gb_Eb)
7208{
7209 IEMOP_MNEMONIC("sub Gb,Eb");
7210 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
7211}
7212
7213
7214/** Opcode 0x2b. */
7215FNIEMOP_DEF(iemOp_sub_Gv_Ev)
7216{
7217 IEMOP_MNEMONIC("sub Gv,Ev");
7218 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
7219}
7220
7221
7222/** Opcode 0x2c. */
7223FNIEMOP_DEF(iemOp_sub_Al_Ib)
7224{
7225 IEMOP_MNEMONIC("sub al,Ib");
7226 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
7227}
7228
7229
7230/** Opcode 0x2d. */
7231FNIEMOP_DEF(iemOp_sub_eAX_Iz)
7232{
7233 IEMOP_MNEMONIC("sub rAX,Iz");
7234 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
7235}
7236
7237
7238/** Opcode 0x2e. */
7239FNIEMOP_DEF(iemOp_seg_CS)
7240{
7241 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs");
7242 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
7243 pIemCpu->iEffSeg = X86_SREG_CS;
7244
7245 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7246 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7247}
7248
7249
7250/** Opcode 0x2f. */
7251FNIEMOP_STUB(iemOp_das);
7252
7253
7254/** Opcode 0x30. */
7255FNIEMOP_DEF(iemOp_xor_Eb_Gb)
7256{
7257 IEMOP_MNEMONIC("xor Eb,Gb");
7258 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7259 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
7260}
7261
7262
7263/** Opcode 0x31. */
7264FNIEMOP_DEF(iemOp_xor_Ev_Gv)
7265{
7266 IEMOP_MNEMONIC("xor Ev,Gv");
7267 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7268 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
7269}
7270
7271
7272/** Opcode 0x32. */
7273FNIEMOP_DEF(iemOp_xor_Gb_Eb)
7274{
7275 IEMOP_MNEMONIC("xor Gb,Eb");
7276 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7277 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
7278}
7279
7280
7281/** Opcode 0x33. */
7282FNIEMOP_DEF(iemOp_xor_Gv_Ev)
7283{
7284 IEMOP_MNEMONIC("xor Gv,Ev");
7285 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7286 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
7287}
7288
7289
7290/** Opcode 0x34. */
7291FNIEMOP_DEF(iemOp_xor_Al_Ib)
7292{
7293 IEMOP_MNEMONIC("xor al,Ib");
7294 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7295 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
7296}
7297
7298
7299/** Opcode 0x35. */
7300FNIEMOP_DEF(iemOp_xor_eAX_Iz)
7301{
7302 IEMOP_MNEMONIC("xor rAX,Iz");
7303 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7304 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
7305}
7306
7307
7308/** Opcode 0x36. */
7309FNIEMOP_DEF(iemOp_seg_SS)
7310{
7311 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss");
7312 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
7313 pIemCpu->iEffSeg = X86_SREG_SS;
7314
7315 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7316 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7317}
7318
7319
7320/** Opcode 0x37. */
7321FNIEMOP_STUB(iemOp_aaa);
7322
7323
7324/** Opcode 0x38. */
7325FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
7326{
7327 IEMOP_MNEMONIC("cmp Eb,Gb");
7328 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7329 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
7330}
7331
7332
7333/** Opcode 0x39. */
7334FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
7335{
7336 IEMOP_MNEMONIC("cmp Ev,Gv");
7337 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7338 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
7339}
7340
7341
7342/** Opcode 0x3a. */
7343FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
7344{
7345 IEMOP_MNEMONIC("cmp Gb,Eb");
7346 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
7347}
7348
7349
7350/** Opcode 0x3b. */
7351FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
7352{
7353 IEMOP_MNEMONIC("cmp Gv,Ev");
7354 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
7355}
7356
7357
7358/** Opcode 0x3c. */
7359FNIEMOP_DEF(iemOp_cmp_Al_Ib)
7360{
7361 IEMOP_MNEMONIC("cmp al,Ib");
7362 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
7363}
7364
7365
7366/** Opcode 0x3d. */
7367FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
7368{
7369 IEMOP_MNEMONIC("cmp rAX,Iz");
7370 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
7371}
7372
7373
7374/** Opcode 0x3e. */
7375FNIEMOP_DEF(iemOp_seg_DS)
7376{
7377 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds");
7378 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
7379 pIemCpu->iEffSeg = X86_SREG_DS;
7380
7381 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7382 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7383}
7384
7385
7386/** Opcode 0x3f. */
7387FNIEMOP_STUB(iemOp_aas);
7388
7389/**
7390 * Common 'inc/dec/not/neg register' helper.
7391 */
7392FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
7393{
7394 IEMOP_HLP_NO_LOCK_PREFIX();
7395 switch (pIemCpu->enmEffOpSize)
7396 {
7397 case IEMMODE_16BIT:
7398 IEM_MC_BEGIN(2, 0);
7399 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
7400 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7401 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7402 IEM_MC_REF_EFLAGS(pEFlags);
7403 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
7404 IEM_MC_ADVANCE_RIP();
7405 IEM_MC_END();
7406 return VINF_SUCCESS;
7407
7408 case IEMMODE_32BIT:
7409 IEM_MC_BEGIN(2, 0);
7410 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
7411 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7412 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7413 IEM_MC_REF_EFLAGS(pEFlags);
7414 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
7415 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
7416 IEM_MC_ADVANCE_RIP();
7417 IEM_MC_END();
7418 return VINF_SUCCESS;
7419
7420 case IEMMODE_64BIT:
7421 IEM_MC_BEGIN(2, 0);
7422 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
7423 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7424 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7425 IEM_MC_REF_EFLAGS(pEFlags);
7426 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
7427 IEM_MC_ADVANCE_RIP();
7428 IEM_MC_END();
7429 return VINF_SUCCESS;
7430 }
7431 return VINF_SUCCESS;
7432}
7433
7434
7435/** Opcode 0x40. */
7436FNIEMOP_DEF(iemOp_inc_eAX)
7437{
7438 /*
7439 * This is a REX prefix in 64-bit mode.
7440 */
7441 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7442 {
7443 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex");
7444 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
7445
7446 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7447 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7448 }
7449
7450 IEMOP_MNEMONIC("inc eAX");
7451 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
7452}
7453
7454
7455/** Opcode 0x41. */
7456FNIEMOP_DEF(iemOp_inc_eCX)
7457{
7458 /*
7459 * This is a REX prefix in 64-bit mode.
7460 */
7461 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7462 {
7463 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b");
7464 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
7465 pIemCpu->uRexB = 1 << 3;
7466
7467 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7468 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7469 }
7470
7471 IEMOP_MNEMONIC("inc eCX");
7472 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
7473}
7474
7475
7476/** Opcode 0x42. */
7477FNIEMOP_DEF(iemOp_inc_eDX)
7478{
7479 /*
7480 * This is a REX prefix in 64-bit mode.
7481 */
7482 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7483 {
7484 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x");
7485 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
7486 pIemCpu->uRexIndex = 1 << 3;
7487
7488 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7489 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7490 }
7491
7492 IEMOP_MNEMONIC("inc eDX");
7493 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
7494}
7495
7496
7497
7498/** Opcode 0x43. */
7499FNIEMOP_DEF(iemOp_inc_eBX)
7500{
7501 /*
7502 * This is a REX prefix in 64-bit mode.
7503 */
7504 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7505 {
7506 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx");
7507 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7508 pIemCpu->uRexB = 1 << 3;
7509 pIemCpu->uRexIndex = 1 << 3;
7510
7511 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7512 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7513 }
7514
7515 IEMOP_MNEMONIC("inc eBX");
7516 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
7517}
7518
7519
7520/** Opcode 0x44. */
7521FNIEMOP_DEF(iemOp_inc_eSP)
7522{
7523 /*
7524 * This is a REX prefix in 64-bit mode.
7525 */
7526 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7527 {
7528 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r");
7529 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
7530 pIemCpu->uRexReg = 1 << 3;
7531
7532 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7533 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7534 }
7535
7536 IEMOP_MNEMONIC("inc eSP");
7537 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
7538}
7539
7540
7541/** Opcode 0x45. */
7542FNIEMOP_DEF(iemOp_inc_eBP)
7543{
7544 /*
7545 * This is a REX prefix in 64-bit mode.
7546 */
7547 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7548 {
7549 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb");
7550 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
7551 pIemCpu->uRexReg = 1 << 3;
7552 pIemCpu->uRexB = 1 << 3;
7553
7554 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7555 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7556 }
7557
7558 IEMOP_MNEMONIC("inc eBP");
7559 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
7560}
7561
7562
7563/** Opcode 0x46. */
7564FNIEMOP_DEF(iemOp_inc_eSI)
7565{
7566 /*
7567 * This is a REX prefix in 64-bit mode.
7568 */
7569 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7570 {
7571 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx");
7572 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
7573 pIemCpu->uRexReg = 1 << 3;
7574 pIemCpu->uRexIndex = 1 << 3;
7575
7576 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7577 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7578 }
7579
7580 IEMOP_MNEMONIC("inc eSI");
7581 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
7582}
7583
7584
7585/** Opcode 0x47. */
7586FNIEMOP_DEF(iemOp_inc_eDI)
7587{
7588 /*
7589 * This is a REX prefix in 64-bit mode.
7590 */
7591 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7592 {
7593 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx");
7594 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7595 pIemCpu->uRexReg = 1 << 3;
7596 pIemCpu->uRexB = 1 << 3;
7597 pIemCpu->uRexIndex = 1 << 3;
7598
7599 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7600 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7601 }
7602
7603 IEMOP_MNEMONIC("inc eDI");
7604 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
7605}
7606
7607
7608/** Opcode 0x48. */
7609FNIEMOP_DEF(iemOp_dec_eAX)
7610{
7611 /*
7612 * This is a REX prefix in 64-bit mode.
7613 */
7614 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7615 {
7616 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w");
7617 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
7618 iemRecalEffOpSize(pIemCpu);
7619
7620 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7621 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7622 }
7623
7624 IEMOP_MNEMONIC("dec eAX");
7625 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
7626}
7627
7628
7629/** Opcode 0x49. */
7630FNIEMOP_DEF(iemOp_dec_eCX)
7631{
7632 /*
7633 * This is a REX prefix in 64-bit mode.
7634 */
7635 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7636 {
7637 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw");
7638 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
7639 pIemCpu->uRexB = 1 << 3;
7640 iemRecalEffOpSize(pIemCpu);
7641
7642 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7643 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7644 }
7645
7646 IEMOP_MNEMONIC("dec eCX");
7647 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
7648}
7649
7650
7651/** Opcode 0x4a. */
7652FNIEMOP_DEF(iemOp_dec_eDX)
7653{
7654 /*
7655 * This is a REX prefix in 64-bit mode.
7656 */
7657 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7658 {
7659 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw");
7660 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7661 pIemCpu->uRexIndex = 1 << 3;
7662 iemRecalEffOpSize(pIemCpu);
7663
7664 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7665 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7666 }
7667
7668 IEMOP_MNEMONIC("dec eDX");
7669 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
7670}
7671
7672
7673/** Opcode 0x4b. */
7674FNIEMOP_DEF(iemOp_dec_eBX)
7675{
7676 /*
7677 * This is a REX prefix in 64-bit mode.
7678 */
7679 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7680 {
7681 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw");
7682 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7683 pIemCpu->uRexB = 1 << 3;
7684 pIemCpu->uRexIndex = 1 << 3;
7685 iemRecalEffOpSize(pIemCpu);
7686
7687 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7688 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7689 }
7690
7691 IEMOP_MNEMONIC("dec eBX");
7692 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
7693}
7694
7695
7696/** Opcode 0x4c. */
7697FNIEMOP_DEF(iemOp_dec_eSP)
7698{
7699 /*
7700 * This is a REX prefix in 64-bit mode.
7701 */
7702 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7703 {
7704 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw");
7705 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
7706 pIemCpu->uRexReg = 1 << 3;
7707 iemRecalEffOpSize(pIemCpu);
7708
7709 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7710 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7711 }
7712
7713 IEMOP_MNEMONIC("dec eSP");
7714 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
7715}
7716
7717
7718/** Opcode 0x4d. */
7719FNIEMOP_DEF(iemOp_dec_eBP)
7720{
7721 /*
7722 * This is a REX prefix in 64-bit mode.
7723 */
7724 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7725 {
7726 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw");
7727 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
7728 pIemCpu->uRexReg = 1 << 3;
7729 pIemCpu->uRexB = 1 << 3;
7730 iemRecalEffOpSize(pIemCpu);
7731
7732 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7733 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7734 }
7735
7736 IEMOP_MNEMONIC("dec eBP");
7737 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
7738}
7739
7740
7741/** Opcode 0x4e. */
7742FNIEMOP_DEF(iemOp_dec_eSI)
7743{
7744 /*
7745 * This is a REX prefix in 64-bit mode.
7746 */
7747 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7748 {
7749 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw");
7750 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7751 pIemCpu->uRexReg = 1 << 3;
7752 pIemCpu->uRexIndex = 1 << 3;
7753 iemRecalEffOpSize(pIemCpu);
7754
7755 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7756 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7757 }
7758
7759 IEMOP_MNEMONIC("dec eSI");
7760 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
7761}
7762
7763
7764/** Opcode 0x4f. */
7765FNIEMOP_DEF(iemOp_dec_eDI)
7766{
7767 /*
7768 * This is a REX prefix in 64-bit mode.
7769 */
7770 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7771 {
7772 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw");
7773 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7774 pIemCpu->uRexReg = 1 << 3;
7775 pIemCpu->uRexB = 1 << 3;
7776 pIemCpu->uRexIndex = 1 << 3;
7777 iemRecalEffOpSize(pIemCpu);
7778
7779 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7780 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7781 }
7782
7783 IEMOP_MNEMONIC("dec eDI");
7784 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
7785}
7786
7787
7788/**
7789 * Common 'push register' helper.
7790 */
7791FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
7792{
7793 IEMOP_HLP_NO_LOCK_PREFIX();
7794 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7795 {
7796 iReg |= pIemCpu->uRexB;
7797 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7798 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7799 }
7800
7801 switch (pIemCpu->enmEffOpSize)
7802 {
7803 case IEMMODE_16BIT:
7804 IEM_MC_BEGIN(0, 1);
7805 IEM_MC_LOCAL(uint16_t, u16Value);
7806 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
7807 IEM_MC_PUSH_U16(u16Value);
7808 IEM_MC_ADVANCE_RIP();
7809 IEM_MC_END();
7810 break;
7811
7812 case IEMMODE_32BIT:
7813 IEM_MC_BEGIN(0, 1);
7814 IEM_MC_LOCAL(uint32_t, u32Value);
7815 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
7816 IEM_MC_PUSH_U32(u32Value);
7817 IEM_MC_ADVANCE_RIP();
7818 IEM_MC_END();
7819 break;
7820
7821 case IEMMODE_64BIT:
7822 IEM_MC_BEGIN(0, 1);
7823 IEM_MC_LOCAL(uint64_t, u64Value);
7824 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
7825 IEM_MC_PUSH_U64(u64Value);
7826 IEM_MC_ADVANCE_RIP();
7827 IEM_MC_END();
7828 break;
7829 }
7830
7831 return VINF_SUCCESS;
7832}
7833
7834
7835/** Opcode 0x50. */
7836FNIEMOP_DEF(iemOp_push_eAX)
7837{
7838 IEMOP_MNEMONIC("push rAX");
7839 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
7840}
7841
7842
7843/** Opcode 0x51. */
7844FNIEMOP_DEF(iemOp_push_eCX)
7845{
7846 IEMOP_MNEMONIC("push rCX");
7847 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
7848}
7849
7850
7851/** Opcode 0x52. */
7852FNIEMOP_DEF(iemOp_push_eDX)
7853{
7854 IEMOP_MNEMONIC("push rDX");
7855 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
7856}
7857
7858
7859/** Opcode 0x53. */
7860FNIEMOP_DEF(iemOp_push_eBX)
7861{
7862 IEMOP_MNEMONIC("push rBX");
7863 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
7864}
7865
7866
7867/** Opcode 0x54. */
7868FNIEMOP_DEF(iemOp_push_eSP)
7869{
7870 IEMOP_MNEMONIC("push rSP");
7871 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
7872}
7873
7874
7875/** Opcode 0x55. */
7876FNIEMOP_DEF(iemOp_push_eBP)
7877{
7878 IEMOP_MNEMONIC("push rBP");
7879 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
7880}
7881
7882
7883/** Opcode 0x56. */
7884FNIEMOP_DEF(iemOp_push_eSI)
7885{
7886 IEMOP_MNEMONIC("push rSI");
7887 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
7888}
7889
7890
7891/** Opcode 0x57. */
7892FNIEMOP_DEF(iemOp_push_eDI)
7893{
7894 IEMOP_MNEMONIC("push rDI");
7895 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
7896}
7897
7898
7899/**
7900 * Common 'pop register' helper.
7901 */
7902FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
7903{
7904 IEMOP_HLP_NO_LOCK_PREFIX();
7905 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7906 {
7907 iReg |= pIemCpu->uRexB;
7908 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7909 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7910 }
7911
7912/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
7913 * handle it, for that matter (Intel pseudo code hints that the popped
7914 * value is incremented by the stack item size.) Test it, both encodings
7915 * and all three register sizes. */
7916 switch (pIemCpu->enmEffOpSize)
7917 {
7918 case IEMMODE_16BIT:
7919 IEM_MC_BEGIN(0, 1);
7920 IEM_MC_LOCAL(uint16_t, *pu16Dst);
7921 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7922 IEM_MC_POP_U16(pu16Dst);
7923 IEM_MC_ADVANCE_RIP();
7924 IEM_MC_END();
7925 break;
7926
7927 case IEMMODE_32BIT:
7928 IEM_MC_BEGIN(0, 1);
7929 IEM_MC_LOCAL(uint32_t, *pu32Dst);
7930 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7931 IEM_MC_POP_U32(pu32Dst);
7932 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); /** @todo testcase*/
7933 IEM_MC_ADVANCE_RIP();
7934 IEM_MC_END();
7935 break;
7936
7937 case IEMMODE_64BIT:
7938 IEM_MC_BEGIN(0, 1);
7939 IEM_MC_LOCAL(uint64_t, *pu64Dst);
7940 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7941 IEM_MC_POP_U64(pu64Dst);
7942 IEM_MC_ADVANCE_RIP();
7943 IEM_MC_END();
7944 break;
7945 }
7946
7947 return VINF_SUCCESS;
7948}
7949
7950
7951/** Opcode 0x58. */
7952FNIEMOP_DEF(iemOp_pop_eAX)
7953{
7954 IEMOP_MNEMONIC("pop rAX");
7955 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
7956}
7957
7958
7959/** Opcode 0x59. */
7960FNIEMOP_DEF(iemOp_pop_eCX)
7961{
7962 IEMOP_MNEMONIC("pop rCX");
7963 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
7964}
7965
7966
7967/** Opcode 0x5a. */
7968FNIEMOP_DEF(iemOp_pop_eDX)
7969{
7970 IEMOP_MNEMONIC("pop rDX");
7971 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
7972}
7973
7974
7975/** Opcode 0x5b. */
7976FNIEMOP_DEF(iemOp_pop_eBX)
7977{
7978 IEMOP_MNEMONIC("pop rBX");
7979 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
7980}
7981
7982
7983/** Opcode 0x5c. */
7984FNIEMOP_DEF(iemOp_pop_eSP)
7985{
7986 IEMOP_MNEMONIC("pop rSP");
7987 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
7988}
7989
7990
7991/** Opcode 0x5d. */
7992FNIEMOP_DEF(iemOp_pop_eBP)
7993{
7994 IEMOP_MNEMONIC("pop rBP");
7995 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
7996}
7997
7998
7999/** Opcode 0x5e. */
8000FNIEMOP_DEF(iemOp_pop_eSI)
8001{
8002 IEMOP_MNEMONIC("pop rSI");
8003 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
8004}
8005
8006
8007/** Opcode 0x5f. */
8008FNIEMOP_DEF(iemOp_pop_eDI)
8009{
8010 IEMOP_MNEMONIC("pop rDI");
8011 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
8012}
8013
8014
8015/** Opcode 0x60. */
8016FNIEMOP_DEF(iemOp_pusha)
8017{
8018 IEMOP_MNEMONIC("pusha");
8019 IEMOP_HLP_NO_64BIT();
8020 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8021 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
8022 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8023 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
8024}
8025
8026
8027/** Opcode 0x61. */
8028FNIEMOP_DEF(iemOp_popa)
8029{
8030 IEMOP_MNEMONIC("popa");
8031 IEMOP_HLP_NO_64BIT();
8032 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8033 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
8034 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8035 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
8036}
8037
8038
8039/** Opcode 0x62. */
8040FNIEMOP_STUB(iemOp_bound_Gv_Ma);
8041
8042
8043/** Opcode 0x63 - non-64-bit modes. */
8044FNIEMOP_DEF(iemOp_arpl_Ew_Gw)
8045{
8046 IEMOP_MNEMONIC("arpl Ew,Gw");
8047 IEMOP_HLP_NO_REAL_OR_V86_MODE();
8048 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8049
8050 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8051 {
8052 /* Register */
8053 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8054 IEM_MC_BEGIN(3, 0);
8055 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8056 IEM_MC_ARG(uint16_t, u16Src, 1);
8057 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8058
8059 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8060 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK));
8061 IEM_MC_REF_EFLAGS(pEFlags);
8062 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8063
8064 IEM_MC_ADVANCE_RIP();
8065 IEM_MC_END();
8066 }
8067 else
8068 {
8069 /* Memory */
8070 IEM_MC_BEGIN(3, 2);
8071 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8072 IEM_MC_ARG(uint16_t, u16Src, 1);
8073 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8074 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8075
8076 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8077 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8078 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8079 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8080 IEM_MC_FETCH_EFLAGS(EFlags);
8081 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8082
8083 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
8084 IEM_MC_COMMIT_EFLAGS(EFlags);
8085 IEM_MC_ADVANCE_RIP();
8086 IEM_MC_END();
8087 }
8088 return VINF_SUCCESS;
8089
8090}
8091
8092
8093/** Opcode 0x63.
8094 * @note This is a weird one. It works like a regular move instruction if
8095 * REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11).
8096 * @todo This definitely needs a testcase to verify the odd cases. */
8097FNIEMOP_DEF(iemOp_movsxd_Gv_Ev)
8098{
8099 Assert(pIemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */
8100
8101 IEMOP_MNEMONIC("movsxd Gv,Ev");
8102 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8103
8104 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8105 {
8106 /*
8107 * Register to register.
8108 */
8109 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8110 IEM_MC_BEGIN(0, 1);
8111 IEM_MC_LOCAL(uint64_t, u64Value);
8112 IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8113 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8114 IEM_MC_ADVANCE_RIP();
8115 IEM_MC_END();
8116 }
8117 else
8118 {
8119 /*
8120 * We're loading a register from memory.
8121 */
8122 IEM_MC_BEGIN(0, 2);
8123 IEM_MC_LOCAL(uint64_t, u64Value);
8124 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8125 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8126 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8127 IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
8128 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8129 IEM_MC_ADVANCE_RIP();
8130 IEM_MC_END();
8131 }
8132 return VINF_SUCCESS;
8133}
8134
8135
8136/** Opcode 0x64. */
8137FNIEMOP_DEF(iemOp_seg_FS)
8138{
8139 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs");
8140 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
8141 pIemCpu->iEffSeg = X86_SREG_FS;
8142
8143 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8144 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8145}
8146
8147
8148/** Opcode 0x65. */
8149FNIEMOP_DEF(iemOp_seg_GS)
8150{
8151 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs");
8152 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
8153 pIemCpu->iEffSeg = X86_SREG_GS;
8154
8155 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8156 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8157}
8158
8159
8160/** Opcode 0x66. */
8161FNIEMOP_DEF(iemOp_op_size)
8162{
8163 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size");
8164 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
8165 iemRecalEffOpSize(pIemCpu);
8166
8167 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8168 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8169}
8170
8171
8172/** Opcode 0x67. */
8173FNIEMOP_DEF(iemOp_addr_size)
8174{
8175 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size");
8176 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
8177 switch (pIemCpu->enmDefAddrMode)
8178 {
8179 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8180 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
8181 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8182 default: AssertFailed();
8183 }
8184
8185 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8186 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8187}
8188
8189
8190/** Opcode 0x68. */
8191FNIEMOP_DEF(iemOp_push_Iz)
8192{
8193 IEMOP_MNEMONIC("push Iz");
8194 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8195 switch (pIemCpu->enmEffOpSize)
8196 {
8197 case IEMMODE_16BIT:
8198 {
8199 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8200 IEMOP_HLP_NO_LOCK_PREFIX();
8201 IEM_MC_BEGIN(0,0);
8202 IEM_MC_PUSH_U16(u16Imm);
8203 IEM_MC_ADVANCE_RIP();
8204 IEM_MC_END();
8205 return VINF_SUCCESS;
8206 }
8207
8208 case IEMMODE_32BIT:
8209 {
8210 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8211 IEMOP_HLP_NO_LOCK_PREFIX();
8212 IEM_MC_BEGIN(0,0);
8213 IEM_MC_PUSH_U32(u32Imm);
8214 IEM_MC_ADVANCE_RIP();
8215 IEM_MC_END();
8216 return VINF_SUCCESS;
8217 }
8218
8219 case IEMMODE_64BIT:
8220 {
8221 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8222 IEMOP_HLP_NO_LOCK_PREFIX();
8223 IEM_MC_BEGIN(0,0);
8224 IEM_MC_PUSH_U64(u64Imm);
8225 IEM_MC_ADVANCE_RIP();
8226 IEM_MC_END();
8227 return VINF_SUCCESS;
8228 }
8229
8230 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8231 }
8232}
8233
8234
8235/** Opcode 0x69. */
8236FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
8237{
8238 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
8239 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8240 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8241
8242 switch (pIemCpu->enmEffOpSize)
8243 {
8244 case IEMMODE_16BIT:
8245 {
8246 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8247 {
8248 /* register operand */
8249 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8250 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8251
8252 IEM_MC_BEGIN(3, 1);
8253 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8254 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
8255 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8256 IEM_MC_LOCAL(uint16_t, u16Tmp);
8257
8258 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8259 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8260 IEM_MC_REF_EFLAGS(pEFlags);
8261 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8262 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8263
8264 IEM_MC_ADVANCE_RIP();
8265 IEM_MC_END();
8266 }
8267 else
8268 {
8269 /* memory operand */
8270 IEM_MC_BEGIN(3, 2);
8271 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8272 IEM_MC_ARG(uint16_t, u16Src, 1);
8273 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8274 IEM_MC_LOCAL(uint16_t, u16Tmp);
8275 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8276
8277 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
8278 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8279 IEM_MC_ASSIGN(u16Src, u16Imm);
8280 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8281 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8282 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8283 IEM_MC_REF_EFLAGS(pEFlags);
8284 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8285 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8286
8287 IEM_MC_ADVANCE_RIP();
8288 IEM_MC_END();
8289 }
8290 return VINF_SUCCESS;
8291 }
8292
8293 case IEMMODE_32BIT:
8294 {
8295 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8296 {
8297 /* register operand */
8298 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8299 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8300
8301 IEM_MC_BEGIN(3, 1);
8302 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8303 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
8304 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8305 IEM_MC_LOCAL(uint32_t, u32Tmp);
8306
8307 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8308 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8309 IEM_MC_REF_EFLAGS(pEFlags);
8310 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8311 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8312
8313 IEM_MC_ADVANCE_RIP();
8314 IEM_MC_END();
8315 }
8316 else
8317 {
8318 /* memory operand */
8319 IEM_MC_BEGIN(3, 2);
8320 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8321 IEM_MC_ARG(uint32_t, u32Src, 1);
8322 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8323 IEM_MC_LOCAL(uint32_t, u32Tmp);
8324 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8325
8326 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8327 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8328 IEM_MC_ASSIGN(u32Src, u32Imm);
8329 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8330 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8331 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8332 IEM_MC_REF_EFLAGS(pEFlags);
8333 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8334 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8335
8336 IEM_MC_ADVANCE_RIP();
8337 IEM_MC_END();
8338 }
8339 return VINF_SUCCESS;
8340 }
8341
8342 case IEMMODE_64BIT:
8343 {
8344 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8345 {
8346 /* register operand */
8347 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8348 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8349
8350 IEM_MC_BEGIN(3, 1);
8351 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8352 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
8353 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8354 IEM_MC_LOCAL(uint64_t, u64Tmp);
8355
8356 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8357 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8358 IEM_MC_REF_EFLAGS(pEFlags);
8359 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8360 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8361
8362 IEM_MC_ADVANCE_RIP();
8363 IEM_MC_END();
8364 }
8365 else
8366 {
8367 /* memory operand */
8368 IEM_MC_BEGIN(3, 2);
8369 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8370 IEM_MC_ARG(uint64_t, u64Src, 1);
8371 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8372 IEM_MC_LOCAL(uint64_t, u64Tmp);
8373 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8374
8375 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8376 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8377 IEM_MC_ASSIGN(u64Src, u64Imm);
8378 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8379 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8380 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8381 IEM_MC_REF_EFLAGS(pEFlags);
8382 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8383 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8384
8385 IEM_MC_ADVANCE_RIP();
8386 IEM_MC_END();
8387 }
8388 return VINF_SUCCESS;
8389 }
8390 }
8391 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8392}
8393
8394
8395/** Opcode 0x6a. */
8396FNIEMOP_DEF(iemOp_push_Ib)
8397{
8398 IEMOP_MNEMONIC("push Ib");
8399 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8400 IEMOP_HLP_NO_LOCK_PREFIX();
8401 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8402
8403 IEM_MC_BEGIN(0,0);
8404 switch (pIemCpu->enmEffOpSize)
8405 {
8406 case IEMMODE_16BIT:
8407 IEM_MC_PUSH_U16(i8Imm);
8408 break;
8409 case IEMMODE_32BIT:
8410 IEM_MC_PUSH_U32(i8Imm);
8411 break;
8412 case IEMMODE_64BIT:
8413 IEM_MC_PUSH_U64(i8Imm);
8414 break;
8415 }
8416 IEM_MC_ADVANCE_RIP();
8417 IEM_MC_END();
8418 return VINF_SUCCESS;
8419}
8420
8421
8422/** Opcode 0x6b. */
8423FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
8424{
8425 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
8426 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8427 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8428
8429 switch (pIemCpu->enmEffOpSize)
8430 {
8431 case IEMMODE_16BIT:
8432 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8433 {
8434 /* register operand */
8435 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8436 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8437
8438 IEM_MC_BEGIN(3, 1);
8439 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8440 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
8441 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8442 IEM_MC_LOCAL(uint16_t, u16Tmp);
8443
8444 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8445 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8446 IEM_MC_REF_EFLAGS(pEFlags);
8447 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8448 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8449
8450 IEM_MC_ADVANCE_RIP();
8451 IEM_MC_END();
8452 }
8453 else
8454 {
8455 /* memory operand */
8456 IEM_MC_BEGIN(3, 2);
8457 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8458 IEM_MC_ARG(uint16_t, u16Src, 1);
8459 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8460 IEM_MC_LOCAL(uint16_t, u16Tmp);
8461 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8462
8463 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8464 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16Imm);
8465 IEM_MC_ASSIGN(u16Src, u16Imm);
8466 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8467 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8468 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8469 IEM_MC_REF_EFLAGS(pEFlags);
8470 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8471 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8472
8473 IEM_MC_ADVANCE_RIP();
8474 IEM_MC_END();
8475 }
8476 return VINF_SUCCESS;
8477
8478 case IEMMODE_32BIT:
8479 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8480 {
8481 /* register operand */
8482 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8483 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8484
8485 IEM_MC_BEGIN(3, 1);
8486 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8487 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
8488 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8489 IEM_MC_LOCAL(uint32_t, u32Tmp);
8490
8491 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8492 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8493 IEM_MC_REF_EFLAGS(pEFlags);
8494 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8495 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8496
8497 IEM_MC_ADVANCE_RIP();
8498 IEM_MC_END();
8499 }
8500 else
8501 {
8502 /* memory operand */
8503 IEM_MC_BEGIN(3, 2);
8504 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8505 IEM_MC_ARG(uint32_t, u32Src, 1);
8506 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8507 IEM_MC_LOCAL(uint32_t, u32Tmp);
8508 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8509
8510 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8511 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Imm);
8512 IEM_MC_ASSIGN(u32Src, u32Imm);
8513 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8514 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8515 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8516 IEM_MC_REF_EFLAGS(pEFlags);
8517 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8518 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8519
8520 IEM_MC_ADVANCE_RIP();
8521 IEM_MC_END();
8522 }
8523 return VINF_SUCCESS;
8524
8525 case IEMMODE_64BIT:
8526 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8527 {
8528 /* register operand */
8529 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8530 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8531
8532 IEM_MC_BEGIN(3, 1);
8533 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8534 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
8535 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8536 IEM_MC_LOCAL(uint64_t, u64Tmp);
8537
8538 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8539 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8540 IEM_MC_REF_EFLAGS(pEFlags);
8541 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8542 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8543
8544 IEM_MC_ADVANCE_RIP();
8545 IEM_MC_END();
8546 }
8547 else
8548 {
8549 /* memory operand */
8550 IEM_MC_BEGIN(3, 2);
8551 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8552 IEM_MC_ARG(uint64_t, u64Src, 1);
8553 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8554 IEM_MC_LOCAL(uint64_t, u64Tmp);
8555 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8556
8557 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8558 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S8_SX_U64(&u64Imm);
8559 IEM_MC_ASSIGN(u64Src, u64Imm);
8560 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8561 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8562 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8563 IEM_MC_REF_EFLAGS(pEFlags);
8564 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8565 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8566
8567 IEM_MC_ADVANCE_RIP();
8568 IEM_MC_END();
8569 }
8570 return VINF_SUCCESS;
8571 }
8572 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8573}
8574
8575
8576/** Opcode 0x6c. */
8577FNIEMOP_DEF(iemOp_insb_Yb_DX)
8578{
8579 IEMOP_HLP_NO_LOCK_PREFIX();
8580 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8581 {
8582 IEMOP_MNEMONIC("rep ins Yb,DX");
8583 switch (pIemCpu->enmEffAddrMode)
8584 {
8585 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false);
8586 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false);
8587 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false);
8588 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8589 }
8590 }
8591 else
8592 {
8593 IEMOP_MNEMONIC("ins Yb,DX");
8594 switch (pIemCpu->enmEffAddrMode)
8595 {
8596 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false);
8597 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false);
8598 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false);
8599 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8600 }
8601 }
8602}
8603
8604
8605/** Opcode 0x6d. */
8606FNIEMOP_DEF(iemOp_inswd_Yv_DX)
8607{
8608 IEMOP_HLP_NO_LOCK_PREFIX();
8609 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
8610 {
8611 IEMOP_MNEMONIC("rep ins Yv,DX");
8612 switch (pIemCpu->enmEffOpSize)
8613 {
8614 case IEMMODE_16BIT:
8615 switch (pIemCpu->enmEffAddrMode)
8616 {
8617 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false);
8618 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false);
8619 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false);
8620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8621 }
8622 break;
8623 case IEMMODE_64BIT:
8624 case IEMMODE_32BIT:
8625 switch (pIemCpu->enmEffAddrMode)
8626 {
8627 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false);
8628 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false);
8629 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false);
8630 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8631 }
8632 break;
8633 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8634 }
8635 }
8636 else
8637 {
8638 IEMOP_MNEMONIC("ins Yv,DX");
8639 switch (pIemCpu->enmEffOpSize)
8640 {
8641 case IEMMODE_16BIT:
8642 switch (pIemCpu->enmEffAddrMode)
8643 {
8644 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false);
8645 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false);
8646 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false);
8647 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8648 }
8649 break;
8650 case IEMMODE_64BIT:
8651 case IEMMODE_32BIT:
8652 switch (pIemCpu->enmEffAddrMode)
8653 {
8654 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false);
8655 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false);
8656 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false);
8657 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8658 }
8659 break;
8660 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8661 }
8662 }
8663}
8664
8665
8666/** Opcode 0x6e. */
8667FNIEMOP_DEF(iemOp_outsb_Yb_DX)
8668{
8669 IEMOP_HLP_NO_LOCK_PREFIX();
8670 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8671 {
8672 IEMOP_MNEMONIC("rep out DX,Yb");
8673 switch (pIemCpu->enmEffAddrMode)
8674 {
8675 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false);
8676 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false);
8677 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false);
8678 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8679 }
8680 }
8681 else
8682 {
8683 IEMOP_MNEMONIC("out DX,Yb");
8684 switch (pIemCpu->enmEffAddrMode)
8685 {
8686 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false);
8687 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false);
8688 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false);
8689 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8690 }
8691 }
8692}
8693
8694
8695/** Opcode 0x6f. */
8696FNIEMOP_DEF(iemOp_outswd_Yv_DX)
8697{
8698 IEMOP_HLP_NO_LOCK_PREFIX();
8699 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
8700 {
8701 IEMOP_MNEMONIC("rep outs DX,Yv");
8702 switch (pIemCpu->enmEffOpSize)
8703 {
8704 case IEMMODE_16BIT:
8705 switch (pIemCpu->enmEffAddrMode)
8706 {
8707 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false);
8708 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false);
8709 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false);
8710 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8711 }
8712 break;
8713 case IEMMODE_64BIT:
8714 case IEMMODE_32BIT:
8715 switch (pIemCpu->enmEffAddrMode)
8716 {
8717 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false);
8718 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false);
8719 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false);
8720 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8721 }
8722 break;
8723 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8724 }
8725 }
8726 else
8727 {
8728 IEMOP_MNEMONIC("outs DX,Yv");
8729 switch (pIemCpu->enmEffOpSize)
8730 {
8731 case IEMMODE_16BIT:
8732 switch (pIemCpu->enmEffAddrMode)
8733 {
8734 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false);
8735 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false);
8736 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false);
8737 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8738 }
8739 break;
8740 case IEMMODE_64BIT:
8741 case IEMMODE_32BIT:
8742 switch (pIemCpu->enmEffAddrMode)
8743 {
8744 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false);
8745 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false);
8746 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false);
8747 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8748 }
8749 break;
8750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8751 }
8752 }
8753}
8754
8755
8756/** Opcode 0x70. */
8757FNIEMOP_DEF(iemOp_jo_Jb)
8758{
8759 IEMOP_MNEMONIC("jo Jb");
8760 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8761 IEMOP_HLP_NO_LOCK_PREFIX();
8762 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8763
8764 IEM_MC_BEGIN(0, 0);
8765 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
8766 IEM_MC_REL_JMP_S8(i8Imm);
8767 } IEM_MC_ELSE() {
8768 IEM_MC_ADVANCE_RIP();
8769 } IEM_MC_ENDIF();
8770 IEM_MC_END();
8771 return VINF_SUCCESS;
8772}
8773
8774
8775/** Opcode 0x71. */
8776FNIEMOP_DEF(iemOp_jno_Jb)
8777{
8778 IEMOP_MNEMONIC("jno Jb");
8779 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8780 IEMOP_HLP_NO_LOCK_PREFIX();
8781 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8782
8783 IEM_MC_BEGIN(0, 0);
8784 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
8785 IEM_MC_ADVANCE_RIP();
8786 } IEM_MC_ELSE() {
8787 IEM_MC_REL_JMP_S8(i8Imm);
8788 } IEM_MC_ENDIF();
8789 IEM_MC_END();
8790 return VINF_SUCCESS;
8791}
8792
8793/** Opcode 0x72. */
8794FNIEMOP_DEF(iemOp_jc_Jb)
8795{
8796 IEMOP_MNEMONIC("jc/jnae Jb");
8797 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8798 IEMOP_HLP_NO_LOCK_PREFIX();
8799 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8800
8801 IEM_MC_BEGIN(0, 0);
8802 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
8803 IEM_MC_REL_JMP_S8(i8Imm);
8804 } IEM_MC_ELSE() {
8805 IEM_MC_ADVANCE_RIP();
8806 } IEM_MC_ENDIF();
8807 IEM_MC_END();
8808 return VINF_SUCCESS;
8809}
8810
8811
8812/** Opcode 0x73. */
8813FNIEMOP_DEF(iemOp_jnc_Jb)
8814{
8815 IEMOP_MNEMONIC("jnc/jnb Jb");
8816 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8817 IEMOP_HLP_NO_LOCK_PREFIX();
8818 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8819
8820 IEM_MC_BEGIN(0, 0);
8821 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
8822 IEM_MC_ADVANCE_RIP();
8823 } IEM_MC_ELSE() {
8824 IEM_MC_REL_JMP_S8(i8Imm);
8825 } IEM_MC_ENDIF();
8826 IEM_MC_END();
8827 return VINF_SUCCESS;
8828}
8829
8830
8831/** Opcode 0x74. */
8832FNIEMOP_DEF(iemOp_je_Jb)
8833{
8834 IEMOP_MNEMONIC("je/jz Jb");
8835 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8836 IEMOP_HLP_NO_LOCK_PREFIX();
8837 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8838
8839 IEM_MC_BEGIN(0, 0);
8840 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
8841 IEM_MC_REL_JMP_S8(i8Imm);
8842 } IEM_MC_ELSE() {
8843 IEM_MC_ADVANCE_RIP();
8844 } IEM_MC_ENDIF();
8845 IEM_MC_END();
8846 return VINF_SUCCESS;
8847}
8848
8849
8850/** Opcode 0x75. */
8851FNIEMOP_DEF(iemOp_jne_Jb)
8852{
8853 IEMOP_MNEMONIC("jne/jnz Jb");
8854 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8855 IEMOP_HLP_NO_LOCK_PREFIX();
8856 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8857
8858 IEM_MC_BEGIN(0, 0);
8859 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
8860 IEM_MC_ADVANCE_RIP();
8861 } IEM_MC_ELSE() {
8862 IEM_MC_REL_JMP_S8(i8Imm);
8863 } IEM_MC_ENDIF();
8864 IEM_MC_END();
8865 return VINF_SUCCESS;
8866}
8867
8868
8869/** Opcode 0x76. */
8870FNIEMOP_DEF(iemOp_jbe_Jb)
8871{
8872 IEMOP_MNEMONIC("jbe/jna Jb");
8873 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8874 IEMOP_HLP_NO_LOCK_PREFIX();
8875 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8876
8877 IEM_MC_BEGIN(0, 0);
8878 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
8879 IEM_MC_REL_JMP_S8(i8Imm);
8880 } IEM_MC_ELSE() {
8881 IEM_MC_ADVANCE_RIP();
8882 } IEM_MC_ENDIF();
8883 IEM_MC_END();
8884 return VINF_SUCCESS;
8885}
8886
8887
8888/** Opcode 0x77. */
8889FNIEMOP_DEF(iemOp_jnbe_Jb)
8890{
8891 IEMOP_MNEMONIC("jnbe/ja Jb");
8892 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8893 IEMOP_HLP_NO_LOCK_PREFIX();
8894 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8895
8896 IEM_MC_BEGIN(0, 0);
8897 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
8898 IEM_MC_ADVANCE_RIP();
8899 } IEM_MC_ELSE() {
8900 IEM_MC_REL_JMP_S8(i8Imm);
8901 } IEM_MC_ENDIF();
8902 IEM_MC_END();
8903 return VINF_SUCCESS;
8904}
8905
8906
8907/** Opcode 0x78. */
8908FNIEMOP_DEF(iemOp_js_Jb)
8909{
8910 IEMOP_MNEMONIC("js Jb");
8911 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8912 IEMOP_HLP_NO_LOCK_PREFIX();
8913 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8914
8915 IEM_MC_BEGIN(0, 0);
8916 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
8917 IEM_MC_REL_JMP_S8(i8Imm);
8918 } IEM_MC_ELSE() {
8919 IEM_MC_ADVANCE_RIP();
8920 } IEM_MC_ENDIF();
8921 IEM_MC_END();
8922 return VINF_SUCCESS;
8923}
8924
8925
8926/** Opcode 0x79. */
8927FNIEMOP_DEF(iemOp_jns_Jb)
8928{
8929 IEMOP_MNEMONIC("jns Jb");
8930 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8931 IEMOP_HLP_NO_LOCK_PREFIX();
8932 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8933
8934 IEM_MC_BEGIN(0, 0);
8935 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
8936 IEM_MC_ADVANCE_RIP();
8937 } IEM_MC_ELSE() {
8938 IEM_MC_REL_JMP_S8(i8Imm);
8939 } IEM_MC_ENDIF();
8940 IEM_MC_END();
8941 return VINF_SUCCESS;
8942}
8943
8944
8945/** Opcode 0x7a. */
8946FNIEMOP_DEF(iemOp_jp_Jb)
8947{
8948 IEMOP_MNEMONIC("jp Jb");
8949 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8950 IEMOP_HLP_NO_LOCK_PREFIX();
8951 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8952
8953 IEM_MC_BEGIN(0, 0);
8954 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
8955 IEM_MC_REL_JMP_S8(i8Imm);
8956 } IEM_MC_ELSE() {
8957 IEM_MC_ADVANCE_RIP();
8958 } IEM_MC_ENDIF();
8959 IEM_MC_END();
8960 return VINF_SUCCESS;
8961}
8962
8963
8964/** Opcode 0x7b. */
8965FNIEMOP_DEF(iemOp_jnp_Jb)
8966{
8967 IEMOP_MNEMONIC("jnp Jb");
8968 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8969 IEMOP_HLP_NO_LOCK_PREFIX();
8970 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8971
8972 IEM_MC_BEGIN(0, 0);
8973 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
8974 IEM_MC_ADVANCE_RIP();
8975 } IEM_MC_ELSE() {
8976 IEM_MC_REL_JMP_S8(i8Imm);
8977 } IEM_MC_ENDIF();
8978 IEM_MC_END();
8979 return VINF_SUCCESS;
8980}
8981
8982
8983/** Opcode 0x7c. */
8984FNIEMOP_DEF(iemOp_jl_Jb)
8985{
8986 IEMOP_MNEMONIC("jl/jnge Jb");
8987 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8988 IEMOP_HLP_NO_LOCK_PREFIX();
8989 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8990
8991 IEM_MC_BEGIN(0, 0);
8992 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
8993 IEM_MC_REL_JMP_S8(i8Imm);
8994 } IEM_MC_ELSE() {
8995 IEM_MC_ADVANCE_RIP();
8996 } IEM_MC_ENDIF();
8997 IEM_MC_END();
8998 return VINF_SUCCESS;
8999}
9000
9001
9002/** Opcode 0x7d. */
9003FNIEMOP_DEF(iemOp_jnl_Jb)
9004{
9005 IEMOP_MNEMONIC("jnl/jge Jb");
9006 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9007 IEMOP_HLP_NO_LOCK_PREFIX();
9008 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9009
9010 IEM_MC_BEGIN(0, 0);
9011 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9012 IEM_MC_ADVANCE_RIP();
9013 } IEM_MC_ELSE() {
9014 IEM_MC_REL_JMP_S8(i8Imm);
9015 } IEM_MC_ENDIF();
9016 IEM_MC_END();
9017 return VINF_SUCCESS;
9018}
9019
9020
9021/** Opcode 0x7e. */
9022FNIEMOP_DEF(iemOp_jle_Jb)
9023{
9024 IEMOP_MNEMONIC("jle/jng Jb");
9025 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9026 IEMOP_HLP_NO_LOCK_PREFIX();
9027 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9028
9029 IEM_MC_BEGIN(0, 0);
9030 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9031 IEM_MC_REL_JMP_S8(i8Imm);
9032 } IEM_MC_ELSE() {
9033 IEM_MC_ADVANCE_RIP();
9034 } IEM_MC_ENDIF();
9035 IEM_MC_END();
9036 return VINF_SUCCESS;
9037}
9038
9039
9040/** Opcode 0x7f. */
9041FNIEMOP_DEF(iemOp_jnle_Jb)
9042{
9043 IEMOP_MNEMONIC("jnle/jg Jb");
9044 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9045 IEMOP_HLP_NO_LOCK_PREFIX();
9046 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9047
9048 IEM_MC_BEGIN(0, 0);
9049 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9050 IEM_MC_ADVANCE_RIP();
9051 } IEM_MC_ELSE() {
9052 IEM_MC_REL_JMP_S8(i8Imm);
9053 } IEM_MC_ENDIF();
9054 IEM_MC_END();
9055 return VINF_SUCCESS;
9056}
9057
9058
9059/** Opcode 0x80. */
9060FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
9061{
9062 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9063 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
9064 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9065
9066 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9067 {
9068 /* register target */
9069 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9070 IEMOP_HLP_NO_LOCK_PREFIX();
9071 IEM_MC_BEGIN(3, 0);
9072 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9073 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9074 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9075
9076 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9077 IEM_MC_REF_EFLAGS(pEFlags);
9078 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9079
9080 IEM_MC_ADVANCE_RIP();
9081 IEM_MC_END();
9082 }
9083 else
9084 {
9085 /* memory target */
9086 uint32_t fAccess;
9087 if (pImpl->pfnLockedU8)
9088 fAccess = IEM_ACCESS_DATA_RW;
9089 else
9090 { /* CMP */
9091 IEMOP_HLP_NO_LOCK_PREFIX();
9092 fAccess = IEM_ACCESS_DATA_R;
9093 }
9094 IEM_MC_BEGIN(3, 2);
9095 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9096 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9097 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9098
9099 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9100 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9101 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9102
9103 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9104 IEM_MC_FETCH_EFLAGS(EFlags);
9105 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9106 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9107 else
9108 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
9109
9110 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
9111 IEM_MC_COMMIT_EFLAGS(EFlags);
9112 IEM_MC_ADVANCE_RIP();
9113 IEM_MC_END();
9114 }
9115 return VINF_SUCCESS;
9116}
9117
9118
9119/** Opcode 0x81. */
9120FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
9121{
9122 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9123 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
9124 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9125
9126 switch (pIemCpu->enmEffOpSize)
9127 {
9128 case IEMMODE_16BIT:
9129 {
9130 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9131 {
9132 /* register target */
9133 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9134 IEMOP_HLP_NO_LOCK_PREFIX();
9135 IEM_MC_BEGIN(3, 0);
9136 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9137 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
9138 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9139
9140 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9141 IEM_MC_REF_EFLAGS(pEFlags);
9142 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9143
9144 IEM_MC_ADVANCE_RIP();
9145 IEM_MC_END();
9146 }
9147 else
9148 {
9149 /* memory target */
9150 uint32_t fAccess;
9151 if (pImpl->pfnLockedU16)
9152 fAccess = IEM_ACCESS_DATA_RW;
9153 else
9154 { /* CMP, TEST */
9155 IEMOP_HLP_NO_LOCK_PREFIX();
9156 fAccess = IEM_ACCESS_DATA_R;
9157 }
9158 IEM_MC_BEGIN(3, 2);
9159 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9160 IEM_MC_ARG(uint16_t, u16Src, 1);
9161 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9163
9164 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
9165 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9166 IEM_MC_ASSIGN(u16Src, u16Imm);
9167 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9168 IEM_MC_FETCH_EFLAGS(EFlags);
9169 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9170 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9171 else
9172 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9173
9174 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9175 IEM_MC_COMMIT_EFLAGS(EFlags);
9176 IEM_MC_ADVANCE_RIP();
9177 IEM_MC_END();
9178 }
9179 break;
9180 }
9181
9182 case IEMMODE_32BIT:
9183 {
9184 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9185 {
9186 /* register target */
9187 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9188 IEMOP_HLP_NO_LOCK_PREFIX();
9189 IEM_MC_BEGIN(3, 0);
9190 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9191 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
9192 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9193
9194 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9195 IEM_MC_REF_EFLAGS(pEFlags);
9196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9197 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9198
9199 IEM_MC_ADVANCE_RIP();
9200 IEM_MC_END();
9201 }
9202 else
9203 {
9204 /* memory target */
9205 uint32_t fAccess;
9206 if (pImpl->pfnLockedU32)
9207 fAccess = IEM_ACCESS_DATA_RW;
9208 else
9209 { /* CMP, TEST */
9210 IEMOP_HLP_NO_LOCK_PREFIX();
9211 fAccess = IEM_ACCESS_DATA_R;
9212 }
9213 IEM_MC_BEGIN(3, 2);
9214 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9215 IEM_MC_ARG(uint32_t, u32Src, 1);
9216 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9217 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9218
9219 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9220 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9221 IEM_MC_ASSIGN(u32Src, u32Imm);
9222 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9223 IEM_MC_FETCH_EFLAGS(EFlags);
9224 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9225 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9226 else
9227 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9228
9229 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9230 IEM_MC_COMMIT_EFLAGS(EFlags);
9231 IEM_MC_ADVANCE_RIP();
9232 IEM_MC_END();
9233 }
9234 break;
9235 }
9236
9237 case IEMMODE_64BIT:
9238 {
9239 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9240 {
9241 /* register target */
9242 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9243 IEMOP_HLP_NO_LOCK_PREFIX();
9244 IEM_MC_BEGIN(3, 0);
9245 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9246 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
9247 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9248
9249 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9250 IEM_MC_REF_EFLAGS(pEFlags);
9251 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9252
9253 IEM_MC_ADVANCE_RIP();
9254 IEM_MC_END();
9255 }
9256 else
9257 {
9258 /* memory target */
9259 uint32_t fAccess;
9260 if (pImpl->pfnLockedU64)
9261 fAccess = IEM_ACCESS_DATA_RW;
9262 else
9263 { /* CMP */
9264 IEMOP_HLP_NO_LOCK_PREFIX();
9265 fAccess = IEM_ACCESS_DATA_R;
9266 }
9267 IEM_MC_BEGIN(3, 2);
9268 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9269 IEM_MC_ARG(uint64_t, u64Src, 1);
9270 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9272
9273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9274 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9275 IEM_MC_ASSIGN(u64Src, u64Imm);
9276 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9277 IEM_MC_FETCH_EFLAGS(EFlags);
9278 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9280 else
9281 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9282
9283 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9284 IEM_MC_COMMIT_EFLAGS(EFlags);
9285 IEM_MC_ADVANCE_RIP();
9286 IEM_MC_END();
9287 }
9288 break;
9289 }
9290 }
9291 return VINF_SUCCESS;
9292}
9293
9294
9295/** Opcode 0x82. */
9296FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
9297{
9298 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
9299 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
9300}
9301
9302
9303/** Opcode 0x83. */
9304FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
9305{
9306 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9307 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
9308 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9309
9310 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9311 {
9312 /*
9313 * Register target
9314 */
9315 IEMOP_HLP_NO_LOCK_PREFIX();
9316 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9317 switch (pIemCpu->enmEffOpSize)
9318 {
9319 case IEMMODE_16BIT:
9320 {
9321 IEM_MC_BEGIN(3, 0);
9322 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9323 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
9324 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9325
9326 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9327 IEM_MC_REF_EFLAGS(pEFlags);
9328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9329
9330 IEM_MC_ADVANCE_RIP();
9331 IEM_MC_END();
9332 break;
9333 }
9334
9335 case IEMMODE_32BIT:
9336 {
9337 IEM_MC_BEGIN(3, 0);
9338 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9339 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
9340 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9341
9342 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9343 IEM_MC_REF_EFLAGS(pEFlags);
9344 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9345 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9346
9347 IEM_MC_ADVANCE_RIP();
9348 IEM_MC_END();
9349 break;
9350 }
9351
9352 case IEMMODE_64BIT:
9353 {
9354 IEM_MC_BEGIN(3, 0);
9355 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9356 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
9357 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9358
9359 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9360 IEM_MC_REF_EFLAGS(pEFlags);
9361 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9362
9363 IEM_MC_ADVANCE_RIP();
9364 IEM_MC_END();
9365 break;
9366 }
9367 }
9368 }
9369 else
9370 {
9371 /*
9372 * Memory target.
9373 */
9374 uint32_t fAccess;
9375 if (pImpl->pfnLockedU16)
9376 fAccess = IEM_ACCESS_DATA_RW;
9377 else
9378 { /* CMP */
9379 IEMOP_HLP_NO_LOCK_PREFIX();
9380 fAccess = IEM_ACCESS_DATA_R;
9381 }
9382
9383 switch (pIemCpu->enmEffOpSize)
9384 {
9385 case IEMMODE_16BIT:
9386 {
9387 IEM_MC_BEGIN(3, 2);
9388 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9389 IEM_MC_ARG(uint16_t, u16Src, 1);
9390 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9391 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9392
9393 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9394 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9395 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
9396 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9397 IEM_MC_FETCH_EFLAGS(EFlags);
9398 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9399 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9400 else
9401 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9402
9403 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9404 IEM_MC_COMMIT_EFLAGS(EFlags);
9405 IEM_MC_ADVANCE_RIP();
9406 IEM_MC_END();
9407 break;
9408 }
9409
9410 case IEMMODE_32BIT:
9411 {
9412 IEM_MC_BEGIN(3, 2);
9413 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9414 IEM_MC_ARG(uint32_t, u32Src, 1);
9415 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9416 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9417
9418 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9419 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9420 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
9421 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9422 IEM_MC_FETCH_EFLAGS(EFlags);
9423 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9424 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9425 else
9426 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9427
9428 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9429 IEM_MC_COMMIT_EFLAGS(EFlags);
9430 IEM_MC_ADVANCE_RIP();
9431 IEM_MC_END();
9432 break;
9433 }
9434
9435 case IEMMODE_64BIT:
9436 {
9437 IEM_MC_BEGIN(3, 2);
9438 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9439 IEM_MC_ARG(uint64_t, u64Src, 1);
9440 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9441 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9442
9443 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9444 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9445 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
9446 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9447 IEM_MC_FETCH_EFLAGS(EFlags);
9448 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9449 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9450 else
9451 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9452
9453 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9454 IEM_MC_COMMIT_EFLAGS(EFlags);
9455 IEM_MC_ADVANCE_RIP();
9456 IEM_MC_END();
9457 break;
9458 }
9459 }
9460 }
9461 return VINF_SUCCESS;
9462}
9463
9464
9465/** Opcode 0x84. */
9466FNIEMOP_DEF(iemOp_test_Eb_Gb)
9467{
9468 IEMOP_MNEMONIC("test Eb,Gb");
9469 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9470 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9471 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
9472}
9473
9474
9475/** Opcode 0x85. */
9476FNIEMOP_DEF(iemOp_test_Ev_Gv)
9477{
9478 IEMOP_MNEMONIC("test Ev,Gv");
9479 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9480 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9481 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
9482}
9483
9484
9485/** Opcode 0x86. */
9486FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
9487{
9488 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9489 IEMOP_MNEMONIC("xchg Eb,Gb");
9490
9491 /*
9492 * If rm is denoting a register, no more instruction bytes.
9493 */
9494 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9495 {
9496 IEMOP_HLP_NO_LOCK_PREFIX();
9497
9498 IEM_MC_BEGIN(0, 2);
9499 IEM_MC_LOCAL(uint8_t, uTmp1);
9500 IEM_MC_LOCAL(uint8_t, uTmp2);
9501
9502 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9503 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9504 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9505 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9506
9507 IEM_MC_ADVANCE_RIP();
9508 IEM_MC_END();
9509 }
9510 else
9511 {
9512 /*
9513 * We're accessing memory.
9514 */
9515/** @todo the register must be committed separately! */
9516 IEM_MC_BEGIN(2, 2);
9517 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
9518 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
9519 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9520
9521 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9522 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9523 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9524 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
9525 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
9526
9527 IEM_MC_ADVANCE_RIP();
9528 IEM_MC_END();
9529 }
9530 return VINF_SUCCESS;
9531}
9532
9533
9534/** Opcode 0x87. */
9535FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
9536{
9537 IEMOP_MNEMONIC("xchg Ev,Gv");
9538 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9539
9540 /*
9541 * If rm is denoting a register, no more instruction bytes.
9542 */
9543 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9544 {
9545 IEMOP_HLP_NO_LOCK_PREFIX();
9546
9547 switch (pIemCpu->enmEffOpSize)
9548 {
9549 case IEMMODE_16BIT:
9550 IEM_MC_BEGIN(0, 2);
9551 IEM_MC_LOCAL(uint16_t, uTmp1);
9552 IEM_MC_LOCAL(uint16_t, uTmp2);
9553
9554 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9555 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9556 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9557 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9558
9559 IEM_MC_ADVANCE_RIP();
9560 IEM_MC_END();
9561 return VINF_SUCCESS;
9562
9563 case IEMMODE_32BIT:
9564 IEM_MC_BEGIN(0, 2);
9565 IEM_MC_LOCAL(uint32_t, uTmp1);
9566 IEM_MC_LOCAL(uint32_t, uTmp2);
9567
9568 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9569 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9570 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9571 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9572
9573 IEM_MC_ADVANCE_RIP();
9574 IEM_MC_END();
9575 return VINF_SUCCESS;
9576
9577 case IEMMODE_64BIT:
9578 IEM_MC_BEGIN(0, 2);
9579 IEM_MC_LOCAL(uint64_t, uTmp1);
9580 IEM_MC_LOCAL(uint64_t, uTmp2);
9581
9582 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9583 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9584 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9585 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9586
9587 IEM_MC_ADVANCE_RIP();
9588 IEM_MC_END();
9589 return VINF_SUCCESS;
9590
9591 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9592 }
9593 }
9594 else
9595 {
9596 /*
9597 * We're accessing memory.
9598 */
9599 switch (pIemCpu->enmEffOpSize)
9600 {
9601/** @todo the register must be committed separately! */
9602 case IEMMODE_16BIT:
9603 IEM_MC_BEGIN(2, 2);
9604 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
9605 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
9606 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9607
9608 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9609 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9610 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9611 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
9612 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
9613
9614 IEM_MC_ADVANCE_RIP();
9615 IEM_MC_END();
9616 return VINF_SUCCESS;
9617
9618 case IEMMODE_32BIT:
9619 IEM_MC_BEGIN(2, 2);
9620 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
9621 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
9622 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9623
9624 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9625 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9626 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9627 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
9628 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
9629
9630 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
9631 IEM_MC_ADVANCE_RIP();
9632 IEM_MC_END();
9633 return VINF_SUCCESS;
9634
9635 case IEMMODE_64BIT:
9636 IEM_MC_BEGIN(2, 2);
9637 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
9638 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
9639 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9640
9641 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9642 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9643 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9644 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
9645 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
9646
9647 IEM_MC_ADVANCE_RIP();
9648 IEM_MC_END();
9649 return VINF_SUCCESS;
9650
9651 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9652 }
9653 }
9654}
9655
9656
9657/** Opcode 0x88. */
9658FNIEMOP_DEF(iemOp_mov_Eb_Gb)
9659{
9660 IEMOP_MNEMONIC("mov Eb,Gb");
9661
9662 uint8_t bRm;
9663 IEM_OPCODE_GET_NEXT_U8(&bRm);
9664 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9665
9666 /*
9667 * If rm is denoting a register, no more instruction bytes.
9668 */
9669 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9670 {
9671 IEM_MC_BEGIN(0, 1);
9672 IEM_MC_LOCAL(uint8_t, u8Value);
9673 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9674 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
9675 IEM_MC_ADVANCE_RIP();
9676 IEM_MC_END();
9677 }
9678 else
9679 {
9680 /*
9681 * We're writing a register to memory.
9682 */
9683 IEM_MC_BEGIN(0, 2);
9684 IEM_MC_LOCAL(uint8_t, u8Value);
9685 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9686 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9687 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9688 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
9689 IEM_MC_ADVANCE_RIP();
9690 IEM_MC_END();
9691 }
9692 return VINF_SUCCESS;
9693
9694}
9695
9696
9697/** Opcode 0x89. */
9698FNIEMOP_DEF(iemOp_mov_Ev_Gv)
9699{
9700 IEMOP_MNEMONIC("mov Ev,Gv");
9701
9702 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9703 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9704
9705 /*
9706 * If rm is denoting a register, no more instruction bytes.
9707 */
9708 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9709 {
9710 switch (pIemCpu->enmEffOpSize)
9711 {
9712 case IEMMODE_16BIT:
9713 IEM_MC_BEGIN(0, 1);
9714 IEM_MC_LOCAL(uint16_t, u16Value);
9715 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9716 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
9717 IEM_MC_ADVANCE_RIP();
9718 IEM_MC_END();
9719 break;
9720
9721 case IEMMODE_32BIT:
9722 IEM_MC_BEGIN(0, 1);
9723 IEM_MC_LOCAL(uint32_t, u32Value);
9724 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9725 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
9726 IEM_MC_ADVANCE_RIP();
9727 IEM_MC_END();
9728 break;
9729
9730 case IEMMODE_64BIT:
9731 IEM_MC_BEGIN(0, 1);
9732 IEM_MC_LOCAL(uint64_t, u64Value);
9733 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9734 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
9735 IEM_MC_ADVANCE_RIP();
9736 IEM_MC_END();
9737 break;
9738 }
9739 }
9740 else
9741 {
9742 /*
9743 * We're writing a register to memory.
9744 */
9745 switch (pIemCpu->enmEffOpSize)
9746 {
9747 case IEMMODE_16BIT:
9748 IEM_MC_BEGIN(0, 2);
9749 IEM_MC_LOCAL(uint16_t, u16Value);
9750 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9751 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9752 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9753 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
9754 IEM_MC_ADVANCE_RIP();
9755 IEM_MC_END();
9756 break;
9757
9758 case IEMMODE_32BIT:
9759 IEM_MC_BEGIN(0, 2);
9760 IEM_MC_LOCAL(uint32_t, u32Value);
9761 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9762 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9763 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9764 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
9765 IEM_MC_ADVANCE_RIP();
9766 IEM_MC_END();
9767 break;
9768
9769 case IEMMODE_64BIT:
9770 IEM_MC_BEGIN(0, 2);
9771 IEM_MC_LOCAL(uint64_t, u64Value);
9772 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9773 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9774 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9775 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
9776 IEM_MC_ADVANCE_RIP();
9777 IEM_MC_END();
9778 break;
9779 }
9780 }
9781 return VINF_SUCCESS;
9782}
9783
9784
9785/** Opcode 0x8a. */
9786FNIEMOP_DEF(iemOp_mov_Gb_Eb)
9787{
9788 IEMOP_MNEMONIC("mov Gb,Eb");
9789
9790 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9791 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9792
9793 /*
9794 * If rm is denoting a register, no more instruction bytes.
9795 */
9796 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9797 {
9798 IEM_MC_BEGIN(0, 1);
9799 IEM_MC_LOCAL(uint8_t, u8Value);
9800 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9801 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
9802 IEM_MC_ADVANCE_RIP();
9803 IEM_MC_END();
9804 }
9805 else
9806 {
9807 /*
9808 * We're loading a register from memory.
9809 */
9810 IEM_MC_BEGIN(0, 2);
9811 IEM_MC_LOCAL(uint8_t, u8Value);
9812 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9813 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9814 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
9815 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
9816 IEM_MC_ADVANCE_RIP();
9817 IEM_MC_END();
9818 }
9819 return VINF_SUCCESS;
9820}
9821
9822
9823/** Opcode 0x8b. */
9824FNIEMOP_DEF(iemOp_mov_Gv_Ev)
9825{
9826 IEMOP_MNEMONIC("mov Gv,Ev");
9827
9828 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9829 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9830
9831 /*
9832 * If rm is denoting a register, no more instruction bytes.
9833 */
9834 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9835 {
9836 switch (pIemCpu->enmEffOpSize)
9837 {
9838 case IEMMODE_16BIT:
9839 IEM_MC_BEGIN(0, 1);
9840 IEM_MC_LOCAL(uint16_t, u16Value);
9841 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9842 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
9843 IEM_MC_ADVANCE_RIP();
9844 IEM_MC_END();
9845 break;
9846
9847 case IEMMODE_32BIT:
9848 IEM_MC_BEGIN(0, 1);
9849 IEM_MC_LOCAL(uint32_t, u32Value);
9850 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9851 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
9852 IEM_MC_ADVANCE_RIP();
9853 IEM_MC_END();
9854 break;
9855
9856 case IEMMODE_64BIT:
9857 IEM_MC_BEGIN(0, 1);
9858 IEM_MC_LOCAL(uint64_t, u64Value);
9859 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9860 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
9861 IEM_MC_ADVANCE_RIP();
9862 IEM_MC_END();
9863 break;
9864 }
9865 }
9866 else
9867 {
9868 /*
9869 * We're loading a register from memory.
9870 */
9871 switch (pIemCpu->enmEffOpSize)
9872 {
9873 case IEMMODE_16BIT:
9874 IEM_MC_BEGIN(0, 2);
9875 IEM_MC_LOCAL(uint16_t, u16Value);
9876 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9877 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9878 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
9879 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
9880 IEM_MC_ADVANCE_RIP();
9881 IEM_MC_END();
9882 break;
9883
9884 case IEMMODE_32BIT:
9885 IEM_MC_BEGIN(0, 2);
9886 IEM_MC_LOCAL(uint32_t, u32Value);
9887 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9888 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9889 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
9890 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
9891 IEM_MC_ADVANCE_RIP();
9892 IEM_MC_END();
9893 break;
9894
9895 case IEMMODE_64BIT:
9896 IEM_MC_BEGIN(0, 2);
9897 IEM_MC_LOCAL(uint64_t, u64Value);
9898 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9899 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9900 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
9901 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
9902 IEM_MC_ADVANCE_RIP();
9903 IEM_MC_END();
9904 break;
9905 }
9906 }
9907 return VINF_SUCCESS;
9908}
9909
9910
9911/** Opcode 0x63. */
9912FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev)
9913{
9914 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9915 return FNIEMOP_CALL(iemOp_arpl_Ew_Gw);
9916 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
9917 return FNIEMOP_CALL(iemOp_mov_Gv_Ev);
9918 return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev);
9919}
9920
9921
9922/** Opcode 0x8c. */
9923FNIEMOP_DEF(iemOp_mov_Ev_Sw)
9924{
9925 IEMOP_MNEMONIC("mov Ev,Sw");
9926
9927 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9928 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9929
9930 /*
9931 * Check that the destination register exists. The REX.R prefix is ignored.
9932 */
9933 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
9934 if ( iSegReg > X86_SREG_GS)
9935 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9936
9937 /*
9938 * If rm is denoting a register, no more instruction bytes.
9939 * In that case, the operand size is respected and the upper bits are
9940 * cleared (starting with some pentium).
9941 */
9942 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9943 {
9944 switch (pIemCpu->enmEffOpSize)
9945 {
9946 case IEMMODE_16BIT:
9947 IEM_MC_BEGIN(0, 1);
9948 IEM_MC_LOCAL(uint16_t, u16Value);
9949 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
9950 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
9951 IEM_MC_ADVANCE_RIP();
9952 IEM_MC_END();
9953 break;
9954
9955 case IEMMODE_32BIT:
9956 IEM_MC_BEGIN(0, 1);
9957 IEM_MC_LOCAL(uint32_t, u32Value);
9958 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
9959 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
9960 IEM_MC_ADVANCE_RIP();
9961 IEM_MC_END();
9962 break;
9963
9964 case IEMMODE_64BIT:
9965 IEM_MC_BEGIN(0, 1);
9966 IEM_MC_LOCAL(uint64_t, u64Value);
9967 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
9968 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
9969 IEM_MC_ADVANCE_RIP();
9970 IEM_MC_END();
9971 break;
9972 }
9973 }
9974 else
9975 {
9976 /*
9977 * We're saving the register to memory. The access is word sized
9978 * regardless of operand size prefixes.
9979 */
9980#if 0 /* not necessary */
9981 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
9982#endif
9983 IEM_MC_BEGIN(0, 2);
9984 IEM_MC_LOCAL(uint16_t, u16Value);
9985 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9986 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9987 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
9988 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
9989 IEM_MC_ADVANCE_RIP();
9990 IEM_MC_END();
9991 }
9992 return VINF_SUCCESS;
9993}
9994
9995
9996
9997
9998/** Opcode 0x8d. */
9999FNIEMOP_DEF(iemOp_lea_Gv_M)
10000{
10001 IEMOP_MNEMONIC("lea Gv,M");
10002 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10003 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10004 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10005 return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */
10006
10007 switch (pIemCpu->enmEffOpSize)
10008 {
10009 case IEMMODE_16BIT:
10010 IEM_MC_BEGIN(0, 2);
10011 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10012 IEM_MC_LOCAL(uint16_t, u16Cast);
10013 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10014 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
10015 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
10016 IEM_MC_ADVANCE_RIP();
10017 IEM_MC_END();
10018 return VINF_SUCCESS;
10019
10020 case IEMMODE_32BIT:
10021 IEM_MC_BEGIN(0, 2);
10022 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10023 IEM_MC_LOCAL(uint32_t, u32Cast);
10024 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10025 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
10026 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
10027 IEM_MC_ADVANCE_RIP();
10028 IEM_MC_END();
10029 return VINF_SUCCESS;
10030
10031 case IEMMODE_64BIT:
10032 IEM_MC_BEGIN(0, 1);
10033 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10034 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10035 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
10036 IEM_MC_ADVANCE_RIP();
10037 IEM_MC_END();
10038 return VINF_SUCCESS;
10039 }
10040 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
10041}
10042
10043
10044/** Opcode 0x8e. */
10045FNIEMOP_DEF(iemOp_mov_Sw_Ev)
10046{
10047 IEMOP_MNEMONIC("mov Sw,Ev");
10048
10049 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10050 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10051
10052 /*
10053 * The practical operand size is 16-bit.
10054 */
10055#if 0 /* not necessary */
10056 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10057#endif
10058
10059 /*
10060 * Check that the destination register exists and can be used with this
10061 * instruction. The REX.R prefix is ignored.
10062 */
10063 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
10064 if ( iSegReg == X86_SREG_CS
10065 || iSegReg > X86_SREG_GS)
10066 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10067
10068 /*
10069 * If rm is denoting a register, no more instruction bytes.
10070 */
10071 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10072 {
10073 IEM_MC_BEGIN(2, 0);
10074 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10075 IEM_MC_ARG(uint16_t, u16Value, 1);
10076 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10077 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10078 IEM_MC_END();
10079 }
10080 else
10081 {
10082 /*
10083 * We're loading the register from memory. The access is word sized
10084 * regardless of operand size prefixes.
10085 */
10086 IEM_MC_BEGIN(2, 1);
10087 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10088 IEM_MC_ARG(uint16_t, u16Value, 1);
10089 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10090 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10091 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10092 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10093 IEM_MC_END();
10094 }
10095 return VINF_SUCCESS;
10096}
10097
10098
10099/** Opcode 0x8f /0. */
10100FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
10101{
10102 /* This bugger is rather annoying as it requires rSP to be updated before
10103 doing the effective address calculations. Will eventually require a
10104 split between the R/M+SIB decoding and the effective address
10105 calculation - which is something that is required for any attempt at
10106 reusing this code for a recompiler. It may also be good to have if we
10107 need to delay #UD exception caused by invalid lock prefixes.
10108
10109 For now, we'll do a mostly safe interpreter-only implementation here. */
10110 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
10111 * now until tests show it's checked.. */
10112 IEMOP_MNEMONIC("pop Ev");
10113 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10114
10115 /* Register access is relatively easy and can share code. */
10116 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10117 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10118
10119 /*
10120 * Memory target.
10121 *
10122 * Intel says that RSP is incremented before it's used in any effective
10123 * address calcuations. This means some serious extra annoyance here since
10124 * we decode and calculate the effective address in one step and like to
10125 * delay committing registers till everything is done.
10126 *
10127 * So, we'll decode and calculate the effective address twice. This will
10128 * require some recoding if turned into a recompiler.
10129 */
10130 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
10131
10132#ifndef TST_IEM_CHECK_MC
10133 /* Calc effective address with modified ESP. */
10134 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
10135 RTGCPTR GCPtrEff;
10136 VBOXSTRICTRC rcStrict;
10137 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10138 if (rcStrict != VINF_SUCCESS)
10139 return rcStrict;
10140 pIemCpu->offOpcode = offOpcodeSaved;
10141
10142 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10143 uint64_t const RspSaved = pCtx->rsp;
10144 switch (pIemCpu->enmEffOpSize)
10145 {
10146 case IEMMODE_16BIT: iemRegAddToRsp(pIemCpu, pCtx, 2); break;
10147 case IEMMODE_32BIT: iemRegAddToRsp(pIemCpu, pCtx, 4); break;
10148 case IEMMODE_64BIT: iemRegAddToRsp(pIemCpu, pCtx, 8); break;
10149 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10150 }
10151 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10152 Assert(rcStrict == VINF_SUCCESS);
10153 pCtx->rsp = RspSaved;
10154
10155 /* Perform the operation - this should be CImpl. */
10156 RTUINT64U TmpRsp;
10157 TmpRsp.u = pCtx->rsp;
10158 switch (pIemCpu->enmEffOpSize)
10159 {
10160 case IEMMODE_16BIT:
10161 {
10162 uint16_t u16Value;
10163 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
10164 if (rcStrict == VINF_SUCCESS)
10165 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
10166 break;
10167 }
10168
10169 case IEMMODE_32BIT:
10170 {
10171 uint32_t u32Value;
10172 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
10173 if (rcStrict == VINF_SUCCESS)
10174 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
10175 break;
10176 }
10177
10178 case IEMMODE_64BIT:
10179 {
10180 uint64_t u64Value;
10181 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
10182 if (rcStrict == VINF_SUCCESS)
10183 rcStrict = iemMemStoreDataU64(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
10184 break;
10185 }
10186
10187 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10188 }
10189 if (rcStrict == VINF_SUCCESS)
10190 {
10191 pCtx->rsp = TmpRsp.u;
10192 iemRegUpdateRip(pIemCpu);
10193 }
10194 return rcStrict;
10195
10196#else
10197 return VERR_IEM_IPE_2;
10198#endif
10199}
10200
10201
10202/** Opcode 0x8f. */
10203FNIEMOP_DEF(iemOp_Grp1A)
10204{
10205 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10206 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only pop Ev in this group. */
10207 return IEMOP_RAISE_INVALID_OPCODE();
10208 return FNIEMOP_CALL_1(iemOp_pop_Ev, bRm);
10209}
10210
10211
10212/**
10213 * Common 'xchg reg,rAX' helper.
10214 */
10215FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
10216{
10217 IEMOP_HLP_NO_LOCK_PREFIX();
10218
10219 iReg |= pIemCpu->uRexB;
10220 switch (pIemCpu->enmEffOpSize)
10221 {
10222 case IEMMODE_16BIT:
10223 IEM_MC_BEGIN(0, 2);
10224 IEM_MC_LOCAL(uint16_t, u16Tmp1);
10225 IEM_MC_LOCAL(uint16_t, u16Tmp2);
10226 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
10227 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
10228 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
10229 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
10230 IEM_MC_ADVANCE_RIP();
10231 IEM_MC_END();
10232 return VINF_SUCCESS;
10233
10234 case IEMMODE_32BIT:
10235 IEM_MC_BEGIN(0, 2);
10236 IEM_MC_LOCAL(uint32_t, u32Tmp1);
10237 IEM_MC_LOCAL(uint32_t, u32Tmp2);
10238 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
10239 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
10240 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
10241 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
10242 IEM_MC_ADVANCE_RIP();
10243 IEM_MC_END();
10244 return VINF_SUCCESS;
10245
10246 case IEMMODE_64BIT:
10247 IEM_MC_BEGIN(0, 2);
10248 IEM_MC_LOCAL(uint64_t, u64Tmp1);
10249 IEM_MC_LOCAL(uint64_t, u64Tmp2);
10250 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
10251 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
10252 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
10253 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
10254 IEM_MC_ADVANCE_RIP();
10255 IEM_MC_END();
10256 return VINF_SUCCESS;
10257
10258 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10259 }
10260}
10261
10262
10263/** Opcode 0x90. */
10264FNIEMOP_DEF(iemOp_nop)
10265{
10266 /* R8/R8D and RAX/EAX can be exchanged. */
10267 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
10268 {
10269 IEMOP_MNEMONIC("xchg r8,rAX");
10270 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
10271 }
10272
10273 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
10274 IEMOP_MNEMONIC("pause");
10275 else
10276 IEMOP_MNEMONIC("nop");
10277 IEM_MC_BEGIN(0, 0);
10278 IEM_MC_ADVANCE_RIP();
10279 IEM_MC_END();
10280 return VINF_SUCCESS;
10281}
10282
10283
10284/** Opcode 0x91. */
10285FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
10286{
10287 IEMOP_MNEMONIC("xchg rCX,rAX");
10288 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
10289}
10290
10291
10292/** Opcode 0x92. */
10293FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
10294{
10295 IEMOP_MNEMONIC("xchg rDX,rAX");
10296 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
10297}
10298
10299
10300/** Opcode 0x93. */
10301FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
10302{
10303 IEMOP_MNEMONIC("xchg rBX,rAX");
10304 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
10305}
10306
10307
10308/** Opcode 0x94. */
10309FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
10310{
10311 IEMOP_MNEMONIC("xchg rSX,rAX");
10312 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
10313}
10314
10315
10316/** Opcode 0x95. */
10317FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
10318{
10319 IEMOP_MNEMONIC("xchg rBP,rAX");
10320 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
10321}
10322
10323
10324/** Opcode 0x96. */
10325FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
10326{
10327 IEMOP_MNEMONIC("xchg rSI,rAX");
10328 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
10329}
10330
10331
10332/** Opcode 0x97. */
10333FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
10334{
10335 IEMOP_MNEMONIC("xchg rDI,rAX");
10336 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
10337}
10338
10339
10340/** Opcode 0x98. */
10341FNIEMOP_DEF(iemOp_cbw)
10342{
10343 IEMOP_HLP_NO_LOCK_PREFIX();
10344 switch (pIemCpu->enmEffOpSize)
10345 {
10346 case IEMMODE_16BIT:
10347 IEMOP_MNEMONIC("cbw");
10348 IEM_MC_BEGIN(0, 1);
10349 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
10350 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
10351 } IEM_MC_ELSE() {
10352 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
10353 } IEM_MC_ENDIF();
10354 IEM_MC_ADVANCE_RIP();
10355 IEM_MC_END();
10356 return VINF_SUCCESS;
10357
10358 case IEMMODE_32BIT:
10359 IEMOP_MNEMONIC("cwde");
10360 IEM_MC_BEGIN(0, 1);
10361 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10362 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
10363 } IEM_MC_ELSE() {
10364 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
10365 } IEM_MC_ENDIF();
10366 IEM_MC_ADVANCE_RIP();
10367 IEM_MC_END();
10368 return VINF_SUCCESS;
10369
10370 case IEMMODE_64BIT:
10371 IEMOP_MNEMONIC("cdqe");
10372 IEM_MC_BEGIN(0, 1);
10373 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10374 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
10375 } IEM_MC_ELSE() {
10376 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
10377 } IEM_MC_ENDIF();
10378 IEM_MC_ADVANCE_RIP();
10379 IEM_MC_END();
10380 return VINF_SUCCESS;
10381
10382 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10383 }
10384}
10385
10386
10387/** Opcode 0x99. */
10388FNIEMOP_DEF(iemOp_cwd)
10389{
10390 IEMOP_HLP_NO_LOCK_PREFIX();
10391 switch (pIemCpu->enmEffOpSize)
10392 {
10393 case IEMMODE_16BIT:
10394 IEMOP_MNEMONIC("cwd");
10395 IEM_MC_BEGIN(0, 1);
10396 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10397 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
10398 } IEM_MC_ELSE() {
10399 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
10400 } IEM_MC_ENDIF();
10401 IEM_MC_ADVANCE_RIP();
10402 IEM_MC_END();
10403 return VINF_SUCCESS;
10404
10405 case IEMMODE_32BIT:
10406 IEMOP_MNEMONIC("cdq");
10407 IEM_MC_BEGIN(0, 1);
10408 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10409 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
10410 } IEM_MC_ELSE() {
10411 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
10412 } IEM_MC_ENDIF();
10413 IEM_MC_ADVANCE_RIP();
10414 IEM_MC_END();
10415 return VINF_SUCCESS;
10416
10417 case IEMMODE_64BIT:
10418 IEMOP_MNEMONIC("cqo");
10419 IEM_MC_BEGIN(0, 1);
10420 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
10421 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
10422 } IEM_MC_ELSE() {
10423 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
10424 } IEM_MC_ENDIF();
10425 IEM_MC_ADVANCE_RIP();
10426 IEM_MC_END();
10427 return VINF_SUCCESS;
10428
10429 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10430 }
10431}
10432
10433
10434/** Opcode 0x9a. */
10435FNIEMOP_DEF(iemOp_call_Ap)
10436{
10437 IEMOP_MNEMONIC("call Ap");
10438 IEMOP_HLP_NO_64BIT();
10439
10440 /* Decode the far pointer address and pass it on to the far call C implementation. */
10441 uint32_t offSeg;
10442 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10443 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10444 else
10445 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10446 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10447 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
10448 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
10449}
10450
10451
10452/** Opcode 0x9b. (aka fwait) */
10453FNIEMOP_DEF(iemOp_wait)
10454{
10455 IEMOP_MNEMONIC("wait");
10456 IEMOP_HLP_NO_LOCK_PREFIX();
10457
10458 IEM_MC_BEGIN(0, 0);
10459 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10460 IEM_MC_MAYBE_RAISE_FPU_XCPT();
10461 IEM_MC_ADVANCE_RIP();
10462 IEM_MC_END();
10463 return VINF_SUCCESS;
10464}
10465
10466
10467/** Opcode 0x9c. */
10468FNIEMOP_DEF(iemOp_pushf_Fv)
10469{
10470 IEMOP_HLP_NO_LOCK_PREFIX();
10471 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10472 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
10473}
10474
10475
10476/** Opcode 0x9d. */
10477FNIEMOP_DEF(iemOp_popf_Fv)
10478{
10479 IEMOP_HLP_NO_LOCK_PREFIX();
10480 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10481 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
10482}
10483
10484
10485/** Opcode 0x9e. */
10486FNIEMOP_DEF(iemOp_sahf)
10487{
10488 IEMOP_MNEMONIC("sahf");
10489 IEMOP_HLP_NO_LOCK_PREFIX();
10490 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10491 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
10492 return IEMOP_RAISE_INVALID_OPCODE();
10493 IEM_MC_BEGIN(0, 2);
10494 IEM_MC_LOCAL(uint32_t, u32Flags);
10495 IEM_MC_LOCAL(uint32_t, EFlags);
10496 IEM_MC_FETCH_EFLAGS(EFlags);
10497 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
10498 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
10499 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
10500 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
10501 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
10502 IEM_MC_COMMIT_EFLAGS(EFlags);
10503 IEM_MC_ADVANCE_RIP();
10504 IEM_MC_END();
10505 return VINF_SUCCESS;
10506}
10507
10508
10509/** Opcode 0x9f. */
10510FNIEMOP_DEF(iemOp_lahf)
10511{
10512 IEMOP_MNEMONIC("lahf");
10513 IEMOP_HLP_NO_LOCK_PREFIX();
10514 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10515 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
10516 return IEMOP_RAISE_INVALID_OPCODE();
10517 IEM_MC_BEGIN(0, 1);
10518 IEM_MC_LOCAL(uint8_t, u8Flags);
10519 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
10520 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
10521 IEM_MC_ADVANCE_RIP();
10522 IEM_MC_END();
10523 return VINF_SUCCESS;
10524}
10525
10526
10527/**
10528 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
10529 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
10530 * prefixes. Will return on failures.
10531 * @param a_GCPtrMemOff The variable to store the offset in.
10532 */
10533#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
10534 do \
10535 { \
10536 switch (pIemCpu->enmEffAddrMode) \
10537 { \
10538 case IEMMODE_16BIT: \
10539 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
10540 break; \
10541 case IEMMODE_32BIT: \
10542 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
10543 break; \
10544 case IEMMODE_64BIT: \
10545 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
10546 break; \
10547 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
10548 } \
10549 IEMOP_HLP_NO_LOCK_PREFIX(); \
10550 } while (0)
10551
10552/** Opcode 0xa0. */
10553FNIEMOP_DEF(iemOp_mov_Al_Ob)
10554{
10555 /*
10556 * Get the offset and fend of lock prefixes.
10557 */
10558 RTGCPTR GCPtrMemOff;
10559 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10560
10561 /*
10562 * Fetch AL.
10563 */
10564 IEM_MC_BEGIN(0,1);
10565 IEM_MC_LOCAL(uint8_t, u8Tmp);
10566 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10567 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10568 IEM_MC_ADVANCE_RIP();
10569 IEM_MC_END();
10570 return VINF_SUCCESS;
10571}
10572
10573
10574/** Opcode 0xa1. */
10575FNIEMOP_DEF(iemOp_mov_rAX_Ov)
10576{
10577 /*
10578 * Get the offset and fend of lock prefixes.
10579 */
10580 IEMOP_MNEMONIC("mov rAX,Ov");
10581 RTGCPTR GCPtrMemOff;
10582 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10583
10584 /*
10585 * Fetch rAX.
10586 */
10587 switch (pIemCpu->enmEffOpSize)
10588 {
10589 case IEMMODE_16BIT:
10590 IEM_MC_BEGIN(0,1);
10591 IEM_MC_LOCAL(uint16_t, u16Tmp);
10592 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10593 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10594 IEM_MC_ADVANCE_RIP();
10595 IEM_MC_END();
10596 return VINF_SUCCESS;
10597
10598 case IEMMODE_32BIT:
10599 IEM_MC_BEGIN(0,1);
10600 IEM_MC_LOCAL(uint32_t, u32Tmp);
10601 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10602 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
10603 IEM_MC_ADVANCE_RIP();
10604 IEM_MC_END();
10605 return VINF_SUCCESS;
10606
10607 case IEMMODE_64BIT:
10608 IEM_MC_BEGIN(0,1);
10609 IEM_MC_LOCAL(uint64_t, u64Tmp);
10610 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10611 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
10612 IEM_MC_ADVANCE_RIP();
10613 IEM_MC_END();
10614 return VINF_SUCCESS;
10615
10616 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10617 }
10618}
10619
10620
10621/** Opcode 0xa2. */
10622FNIEMOP_DEF(iemOp_mov_Ob_AL)
10623{
10624 /*
10625 * Get the offset and fend of lock prefixes.
10626 */
10627 RTGCPTR GCPtrMemOff;
10628 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10629
10630 /*
10631 * Store AL.
10632 */
10633 IEM_MC_BEGIN(0,1);
10634 IEM_MC_LOCAL(uint8_t, u8Tmp);
10635 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
10636 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
10637 IEM_MC_ADVANCE_RIP();
10638 IEM_MC_END();
10639 return VINF_SUCCESS;
10640}
10641
10642
10643/** Opcode 0xa3. */
10644FNIEMOP_DEF(iemOp_mov_Ov_rAX)
10645{
10646 /*
10647 * Get the offset and fend of lock prefixes.
10648 */
10649 RTGCPTR GCPtrMemOff;
10650 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10651
10652 /*
10653 * Store rAX.
10654 */
10655 switch (pIemCpu->enmEffOpSize)
10656 {
10657 case IEMMODE_16BIT:
10658 IEM_MC_BEGIN(0,1);
10659 IEM_MC_LOCAL(uint16_t, u16Tmp);
10660 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
10661 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
10662 IEM_MC_ADVANCE_RIP();
10663 IEM_MC_END();
10664 return VINF_SUCCESS;
10665
10666 case IEMMODE_32BIT:
10667 IEM_MC_BEGIN(0,1);
10668 IEM_MC_LOCAL(uint32_t, u32Tmp);
10669 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
10670 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
10671 IEM_MC_ADVANCE_RIP();
10672 IEM_MC_END();
10673 return VINF_SUCCESS;
10674
10675 case IEMMODE_64BIT:
10676 IEM_MC_BEGIN(0,1);
10677 IEM_MC_LOCAL(uint64_t, u64Tmp);
10678 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
10679 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
10680 IEM_MC_ADVANCE_RIP();
10681 IEM_MC_END();
10682 return VINF_SUCCESS;
10683
10684 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10685 }
10686}
10687
10688/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
10689#define IEM_MOVS_CASE(ValBits, AddrBits) \
10690 IEM_MC_BEGIN(0, 2); \
10691 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
10692 IEM_MC_LOCAL(RTGCPTR, uAddr); \
10693 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
10694 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
10695 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
10696 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
10697 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
10698 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10699 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10700 } IEM_MC_ELSE() { \
10701 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10702 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10703 } IEM_MC_ENDIF(); \
10704 IEM_MC_ADVANCE_RIP(); \
10705 IEM_MC_END();
10706
10707/** Opcode 0xa4. */
10708FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
10709{
10710 IEMOP_HLP_NO_LOCK_PREFIX();
10711
10712 /*
10713 * Use the C implementation if a repeat prefix is encountered.
10714 */
10715 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
10716 {
10717 IEMOP_MNEMONIC("rep movsb Xb,Yb");
10718 switch (pIemCpu->enmEffAddrMode)
10719 {
10720 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
10721 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
10722 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
10723 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10724 }
10725 }
10726 IEMOP_MNEMONIC("movsb Xb,Yb");
10727
10728 /*
10729 * Sharing case implementation with movs[wdq] below.
10730 */
10731 switch (pIemCpu->enmEffAddrMode)
10732 {
10733 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
10734 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
10735 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
10736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10737 }
10738 return VINF_SUCCESS;
10739}
10740
10741
10742/** Opcode 0xa5. */
10743FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
10744{
10745 IEMOP_HLP_NO_LOCK_PREFIX();
10746
10747 /*
10748 * Use the C implementation if a repeat prefix is encountered.
10749 */
10750 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
10751 {
10752 IEMOP_MNEMONIC("rep movs Xv,Yv");
10753 switch (pIemCpu->enmEffOpSize)
10754 {
10755 case IEMMODE_16BIT:
10756 switch (pIemCpu->enmEffAddrMode)
10757 {
10758 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
10759 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
10760 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
10761 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10762 }
10763 break;
10764 case IEMMODE_32BIT:
10765 switch (pIemCpu->enmEffAddrMode)
10766 {
10767 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
10768 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
10769 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
10770 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10771 }
10772 case IEMMODE_64BIT:
10773 switch (pIemCpu->enmEffAddrMode)
10774 {
10775 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10776 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
10777 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
10778 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10779 }
10780 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10781 }
10782 }
10783 IEMOP_MNEMONIC("movs Xv,Yv");
10784
10785 /*
10786 * Annoying double switch here.
10787 * Using ugly macro for implementing the cases, sharing it with movsb.
10788 */
10789 switch (pIemCpu->enmEffOpSize)
10790 {
10791 case IEMMODE_16BIT:
10792 switch (pIemCpu->enmEffAddrMode)
10793 {
10794 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
10795 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
10796 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
10797 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10798 }
10799 break;
10800
10801 case IEMMODE_32BIT:
10802 switch (pIemCpu->enmEffAddrMode)
10803 {
10804 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
10805 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
10806 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
10807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10808 }
10809 break;
10810
10811 case IEMMODE_64BIT:
10812 switch (pIemCpu->enmEffAddrMode)
10813 {
10814 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
10815 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
10816 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
10817 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10818 }
10819 break;
10820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10821 }
10822 return VINF_SUCCESS;
10823}
10824
10825#undef IEM_MOVS_CASE
10826
10827/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
10828#define IEM_CMPS_CASE(ValBits, AddrBits) \
10829 IEM_MC_BEGIN(3, 3); \
10830 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
10831 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
10832 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
10833 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
10834 IEM_MC_LOCAL(RTGCPTR, uAddr); \
10835 \
10836 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
10837 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
10838 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
10839 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
10840 IEM_MC_REF_LOCAL(puValue1, uValue1); \
10841 IEM_MC_REF_EFLAGS(pEFlags); \
10842 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
10843 \
10844 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
10845 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10846 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10847 } IEM_MC_ELSE() { \
10848 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10849 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10850 } IEM_MC_ENDIF(); \
10851 IEM_MC_ADVANCE_RIP(); \
10852 IEM_MC_END(); \
10853
10854/** Opcode 0xa6. */
10855FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
10856{
10857 IEMOP_HLP_NO_LOCK_PREFIX();
10858
10859 /*
10860 * Use the C implementation if a repeat prefix is encountered.
10861 */
10862 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
10863 {
10864 IEMOP_MNEMONIC("repe cmps Xb,Yb");
10865 switch (pIemCpu->enmEffAddrMode)
10866 {
10867 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
10868 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
10869 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
10870 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10871 }
10872 }
10873 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
10874 {
10875 IEMOP_MNEMONIC("repe cmps Xb,Yb");
10876 switch (pIemCpu->enmEffAddrMode)
10877 {
10878 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
10879 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
10880 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
10881 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10882 }
10883 }
10884 IEMOP_MNEMONIC("cmps Xb,Yb");
10885
10886 /*
10887 * Sharing case implementation with cmps[wdq] below.
10888 */
10889 switch (pIemCpu->enmEffAddrMode)
10890 {
10891 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
10892 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
10893 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
10894 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10895 }
10896 return VINF_SUCCESS;
10897
10898}
10899
10900
10901/** Opcode 0xa7. */
10902FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
10903{
10904 IEMOP_HLP_NO_LOCK_PREFIX();
10905
10906 /*
10907 * Use the C implementation if a repeat prefix is encountered.
10908 */
10909 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
10910 {
10911 IEMOP_MNEMONIC("repe cmps Xv,Yv");
10912 switch (pIemCpu->enmEffOpSize)
10913 {
10914 case IEMMODE_16BIT:
10915 switch (pIemCpu->enmEffAddrMode)
10916 {
10917 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
10918 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
10919 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
10920 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10921 }
10922 break;
10923 case IEMMODE_32BIT:
10924 switch (pIemCpu->enmEffAddrMode)
10925 {
10926 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
10927 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
10928 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
10929 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10930 }
10931 case IEMMODE_64BIT:
10932 switch (pIemCpu->enmEffAddrMode)
10933 {
10934 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10935 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
10936 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
10937 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10938 }
10939 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10940 }
10941 }
10942
10943 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
10944 {
10945 IEMOP_MNEMONIC("repne cmps Xv,Yv");
10946 switch (pIemCpu->enmEffOpSize)
10947 {
10948 case IEMMODE_16BIT:
10949 switch (pIemCpu->enmEffAddrMode)
10950 {
10951 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
10952 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
10953 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
10954 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10955 }
10956 break;
10957 case IEMMODE_32BIT:
10958 switch (pIemCpu->enmEffAddrMode)
10959 {
10960 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
10961 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
10962 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
10963 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10964 }
10965 case IEMMODE_64BIT:
10966 switch (pIemCpu->enmEffAddrMode)
10967 {
10968 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10969 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
10970 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
10971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10972 }
10973 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10974 }
10975 }
10976
10977 IEMOP_MNEMONIC("cmps Xv,Yv");
10978
10979 /*
10980 * Annoying double switch here.
10981 * Using ugly macro for implementing the cases, sharing it with cmpsb.
10982 */
10983 switch (pIemCpu->enmEffOpSize)
10984 {
10985 case IEMMODE_16BIT:
10986 switch (pIemCpu->enmEffAddrMode)
10987 {
10988 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
10989 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
10990 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
10991 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10992 }
10993 break;
10994
10995 case IEMMODE_32BIT:
10996 switch (pIemCpu->enmEffAddrMode)
10997 {
10998 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
10999 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
11000 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
11001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11002 }
11003 break;
11004
11005 case IEMMODE_64BIT:
11006 switch (pIemCpu->enmEffAddrMode)
11007 {
11008 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11009 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
11010 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
11011 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11012 }
11013 break;
11014 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11015 }
11016 return VINF_SUCCESS;
11017
11018}
11019
11020#undef IEM_CMPS_CASE
11021
11022/** Opcode 0xa8. */
11023FNIEMOP_DEF(iemOp_test_AL_Ib)
11024{
11025 IEMOP_MNEMONIC("test al,Ib");
11026 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11027 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
11028}
11029
11030
11031/** Opcode 0xa9. */
11032FNIEMOP_DEF(iemOp_test_eAX_Iz)
11033{
11034 IEMOP_MNEMONIC("test rAX,Iz");
11035 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11036 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
11037}
11038
11039
11040/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
11041#define IEM_STOS_CASE(ValBits, AddrBits) \
11042 IEM_MC_BEGIN(0, 2); \
11043 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11044 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11045 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
11046 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11047 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
11048 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11049 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11050 } IEM_MC_ELSE() { \
11051 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11052 } IEM_MC_ENDIF(); \
11053 IEM_MC_ADVANCE_RIP(); \
11054 IEM_MC_END(); \
11055
11056/** Opcode 0xaa. */
11057FNIEMOP_DEF(iemOp_stosb_Yb_AL)
11058{
11059 IEMOP_HLP_NO_LOCK_PREFIX();
11060
11061 /*
11062 * Use the C implementation if a repeat prefix is encountered.
11063 */
11064 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11065 {
11066 IEMOP_MNEMONIC("rep stos Yb,al");
11067 switch (pIemCpu->enmEffAddrMode)
11068 {
11069 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
11070 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
11071 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
11072 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11073 }
11074 }
11075 IEMOP_MNEMONIC("stos Yb,al");
11076
11077 /*
11078 * Sharing case implementation with stos[wdq] below.
11079 */
11080 switch (pIemCpu->enmEffAddrMode)
11081 {
11082 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
11083 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
11084 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
11085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11086 }
11087 return VINF_SUCCESS;
11088}
11089
11090
11091/** Opcode 0xab. */
11092FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
11093{
11094 IEMOP_HLP_NO_LOCK_PREFIX();
11095
11096 /*
11097 * Use the C implementation if a repeat prefix is encountered.
11098 */
11099 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11100 {
11101 IEMOP_MNEMONIC("rep stos Yv,rAX");
11102 switch (pIemCpu->enmEffOpSize)
11103 {
11104 case IEMMODE_16BIT:
11105 switch (pIemCpu->enmEffAddrMode)
11106 {
11107 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
11108 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
11109 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
11110 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11111 }
11112 break;
11113 case IEMMODE_32BIT:
11114 switch (pIemCpu->enmEffAddrMode)
11115 {
11116 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
11117 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
11118 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
11119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11120 }
11121 case IEMMODE_64BIT:
11122 switch (pIemCpu->enmEffAddrMode)
11123 {
11124 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11125 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
11126 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
11127 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11128 }
11129 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11130 }
11131 }
11132 IEMOP_MNEMONIC("stos Yv,rAX");
11133
11134 /*
11135 * Annoying double switch here.
11136 * Using ugly macro for implementing the cases, sharing it with stosb.
11137 */
11138 switch (pIemCpu->enmEffOpSize)
11139 {
11140 case IEMMODE_16BIT:
11141 switch (pIemCpu->enmEffAddrMode)
11142 {
11143 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
11144 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
11145 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
11146 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11147 }
11148 break;
11149
11150 case IEMMODE_32BIT:
11151 switch (pIemCpu->enmEffAddrMode)
11152 {
11153 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
11154 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
11155 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
11156 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11157 }
11158 break;
11159
11160 case IEMMODE_64BIT:
11161 switch (pIemCpu->enmEffAddrMode)
11162 {
11163 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11164 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
11165 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
11166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11167 }
11168 break;
11169 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11170 }
11171 return VINF_SUCCESS;
11172}
11173
11174#undef IEM_STOS_CASE
11175
11176/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
11177#define IEM_LODS_CASE(ValBits, AddrBits) \
11178 IEM_MC_BEGIN(0, 2); \
11179 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11180 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11181 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
11182 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
11183 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
11184 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11185 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11186 } IEM_MC_ELSE() { \
11187 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11188 } IEM_MC_ENDIF(); \
11189 IEM_MC_ADVANCE_RIP(); \
11190 IEM_MC_END();
11191
11192/** Opcode 0xac. */
11193FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
11194{
11195 IEMOP_HLP_NO_LOCK_PREFIX();
11196
11197 /*
11198 * Use the C implementation if a repeat prefix is encountered.
11199 */
11200 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11201 {
11202 IEMOP_MNEMONIC("rep lodsb al,Xb");
11203 switch (pIemCpu->enmEffAddrMode)
11204 {
11205 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
11206 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
11207 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
11208 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11209 }
11210 }
11211 IEMOP_MNEMONIC("lodsb al,Xb");
11212
11213 /*
11214 * Sharing case implementation with stos[wdq] below.
11215 */
11216 switch (pIemCpu->enmEffAddrMode)
11217 {
11218 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
11219 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
11220 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
11221 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11222 }
11223 return VINF_SUCCESS;
11224}
11225
11226
11227/** Opcode 0xad. */
11228FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
11229{
11230 IEMOP_HLP_NO_LOCK_PREFIX();
11231
11232 /*
11233 * Use the C implementation if a repeat prefix is encountered.
11234 */
11235 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11236 {
11237 IEMOP_MNEMONIC("rep lods rAX,Xv");
11238 switch (pIemCpu->enmEffOpSize)
11239 {
11240 case IEMMODE_16BIT:
11241 switch (pIemCpu->enmEffAddrMode)
11242 {
11243 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
11244 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
11245 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
11246 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11247 }
11248 break;
11249 case IEMMODE_32BIT:
11250 switch (pIemCpu->enmEffAddrMode)
11251 {
11252 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
11253 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
11254 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
11255 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11256 }
11257 case IEMMODE_64BIT:
11258 switch (pIemCpu->enmEffAddrMode)
11259 {
11260 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11261 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
11262 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
11263 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11264 }
11265 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11266 }
11267 }
11268 IEMOP_MNEMONIC("lods rAX,Xv");
11269
11270 /*
11271 * Annoying double switch here.
11272 * Using ugly macro for implementing the cases, sharing it with lodsb.
11273 */
11274 switch (pIemCpu->enmEffOpSize)
11275 {
11276 case IEMMODE_16BIT:
11277 switch (pIemCpu->enmEffAddrMode)
11278 {
11279 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
11280 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
11281 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
11282 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11283 }
11284 break;
11285
11286 case IEMMODE_32BIT:
11287 switch (pIemCpu->enmEffAddrMode)
11288 {
11289 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
11290 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
11291 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
11292 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11293 }
11294 break;
11295
11296 case IEMMODE_64BIT:
11297 switch (pIemCpu->enmEffAddrMode)
11298 {
11299 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11300 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
11301 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
11302 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11303 }
11304 break;
11305 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11306 }
11307 return VINF_SUCCESS;
11308}
11309
11310#undef IEM_LODS_CASE
11311
11312/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
11313#define IEM_SCAS_CASE(ValBits, AddrBits) \
11314 IEM_MC_BEGIN(3, 2); \
11315 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
11316 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
11317 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
11318 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11319 \
11320 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11321 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
11322 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
11323 IEM_MC_REF_EFLAGS(pEFlags); \
11324 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
11325 \
11326 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11327 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11328 } IEM_MC_ELSE() { \
11329 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11330 } IEM_MC_ENDIF(); \
11331 IEM_MC_ADVANCE_RIP(); \
11332 IEM_MC_END();
11333
11334/** Opcode 0xae. */
11335FNIEMOP_DEF(iemOp_scasb_AL_Xb)
11336{
11337 IEMOP_HLP_NO_LOCK_PREFIX();
11338
11339 /*
11340 * Use the C implementation if a repeat prefix is encountered.
11341 */
11342 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11343 {
11344 IEMOP_MNEMONIC("repe scasb al,Xb");
11345 switch (pIemCpu->enmEffAddrMode)
11346 {
11347 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
11348 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
11349 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
11350 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11351 }
11352 }
11353 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11354 {
11355 IEMOP_MNEMONIC("repne scasb al,Xb");
11356 switch (pIemCpu->enmEffAddrMode)
11357 {
11358 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
11359 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
11360 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
11361 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11362 }
11363 }
11364 IEMOP_MNEMONIC("scasb al,Xb");
11365
11366 /*
11367 * Sharing case implementation with stos[wdq] below.
11368 */
11369 switch (pIemCpu->enmEffAddrMode)
11370 {
11371 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
11372 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
11373 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
11374 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11375 }
11376 return VINF_SUCCESS;
11377}
11378
11379
11380/** Opcode 0xaf. */
11381FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
11382{
11383 IEMOP_HLP_NO_LOCK_PREFIX();
11384
11385 /*
11386 * Use the C implementation if a repeat prefix is encountered.
11387 */
11388 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11389 {
11390 IEMOP_MNEMONIC("repe scas rAX,Xv");
11391 switch (pIemCpu->enmEffOpSize)
11392 {
11393 case IEMMODE_16BIT:
11394 switch (pIemCpu->enmEffAddrMode)
11395 {
11396 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
11397 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
11398 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
11399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11400 }
11401 break;
11402 case IEMMODE_32BIT:
11403 switch (pIemCpu->enmEffAddrMode)
11404 {
11405 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
11406 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
11407 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
11408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11409 }
11410 case IEMMODE_64BIT:
11411 switch (pIemCpu->enmEffAddrMode)
11412 {
11413 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
11414 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
11415 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
11416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11417 }
11418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11419 }
11420 }
11421 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11422 {
11423 IEMOP_MNEMONIC("repne scas rAX,Xv");
11424 switch (pIemCpu->enmEffOpSize)
11425 {
11426 case IEMMODE_16BIT:
11427 switch (pIemCpu->enmEffAddrMode)
11428 {
11429 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m16);
11430 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m32);
11431 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m64);
11432 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11433 }
11434 break;
11435 case IEMMODE_32BIT:
11436 switch (pIemCpu->enmEffAddrMode)
11437 {
11438 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m16);
11439 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m32);
11440 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m64);
11441 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11442 }
11443 case IEMMODE_64BIT:
11444 switch (pIemCpu->enmEffAddrMode)
11445 {
11446 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11447 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m32);
11448 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m64);
11449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11450 }
11451 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11452 }
11453 }
11454 IEMOP_MNEMONIC("scas rAX,Xv");
11455
11456 /*
11457 * Annoying double switch here.
11458 * Using ugly macro for implementing the cases, sharing it with scasb.
11459 */
11460 switch (pIemCpu->enmEffOpSize)
11461 {
11462 case IEMMODE_16BIT:
11463 switch (pIemCpu->enmEffAddrMode)
11464 {
11465 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
11466 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
11467 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
11468 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11469 }
11470 break;
11471
11472 case IEMMODE_32BIT:
11473 switch (pIemCpu->enmEffAddrMode)
11474 {
11475 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
11476 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
11477 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
11478 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11479 }
11480 break;
11481
11482 case IEMMODE_64BIT:
11483 switch (pIemCpu->enmEffAddrMode)
11484 {
11485 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11486 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
11487 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
11488 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11489 }
11490 break;
11491 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11492 }
11493 return VINF_SUCCESS;
11494}
11495
11496#undef IEM_SCAS_CASE
11497
11498/**
11499 * Common 'mov r8, imm8' helper.
11500 */
11501FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
11502{
11503 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11504 IEMOP_HLP_NO_LOCK_PREFIX();
11505
11506 IEM_MC_BEGIN(0, 1);
11507 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
11508 IEM_MC_STORE_GREG_U8(iReg, u8Value);
11509 IEM_MC_ADVANCE_RIP();
11510 IEM_MC_END();
11511
11512 return VINF_SUCCESS;
11513}
11514
11515
11516/** Opcode 0xb0. */
11517FNIEMOP_DEF(iemOp_mov_AL_Ib)
11518{
11519 IEMOP_MNEMONIC("mov AL,Ib");
11520 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pIemCpu->uRexB);
11521}
11522
11523
11524/** Opcode 0xb1. */
11525FNIEMOP_DEF(iemOp_CL_Ib)
11526{
11527 IEMOP_MNEMONIC("mov CL,Ib");
11528 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pIemCpu->uRexB);
11529}
11530
11531
11532/** Opcode 0xb2. */
11533FNIEMOP_DEF(iemOp_DL_Ib)
11534{
11535 IEMOP_MNEMONIC("mov DL,Ib");
11536 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pIemCpu->uRexB);
11537}
11538
11539
11540/** Opcode 0xb3. */
11541FNIEMOP_DEF(iemOp_BL_Ib)
11542{
11543 IEMOP_MNEMONIC("mov BL,Ib");
11544 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pIemCpu->uRexB);
11545}
11546
11547
11548/** Opcode 0xb4. */
11549FNIEMOP_DEF(iemOp_mov_AH_Ib)
11550{
11551 IEMOP_MNEMONIC("mov AH,Ib");
11552 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pIemCpu->uRexB);
11553}
11554
11555
11556/** Opcode 0xb5. */
11557FNIEMOP_DEF(iemOp_CH_Ib)
11558{
11559 IEMOP_MNEMONIC("mov CH,Ib");
11560 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pIemCpu->uRexB);
11561}
11562
11563
11564/** Opcode 0xb6. */
11565FNIEMOP_DEF(iemOp_DH_Ib)
11566{
11567 IEMOP_MNEMONIC("mov DH,Ib");
11568 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pIemCpu->uRexB);
11569}
11570
11571
11572/** Opcode 0xb7. */
11573FNIEMOP_DEF(iemOp_BH_Ib)
11574{
11575 IEMOP_MNEMONIC("mov BH,Ib");
11576 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pIemCpu->uRexB);
11577}
11578
11579
11580/**
11581 * Common 'mov regX,immX' helper.
11582 */
11583FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
11584{
11585 switch (pIemCpu->enmEffOpSize)
11586 {
11587 case IEMMODE_16BIT:
11588 {
11589 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11590 IEMOP_HLP_NO_LOCK_PREFIX();
11591
11592 IEM_MC_BEGIN(0, 1);
11593 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
11594 IEM_MC_STORE_GREG_U16(iReg, u16Value);
11595 IEM_MC_ADVANCE_RIP();
11596 IEM_MC_END();
11597 break;
11598 }
11599
11600 case IEMMODE_32BIT:
11601 {
11602 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
11603 IEMOP_HLP_NO_LOCK_PREFIX();
11604
11605 IEM_MC_BEGIN(0, 1);
11606 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
11607 IEM_MC_STORE_GREG_U32(iReg, u32Value);
11608 IEM_MC_ADVANCE_RIP();
11609 IEM_MC_END();
11610 break;
11611 }
11612 case IEMMODE_64BIT:
11613 {
11614 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); /* 64-bit immediate! */
11615 IEMOP_HLP_NO_LOCK_PREFIX();
11616
11617 IEM_MC_BEGIN(0, 1);
11618 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
11619 IEM_MC_STORE_GREG_U64(iReg, u64Value);
11620 IEM_MC_ADVANCE_RIP();
11621 IEM_MC_END();
11622 break;
11623 }
11624 }
11625
11626 return VINF_SUCCESS;
11627}
11628
11629
11630/** Opcode 0xb8. */
11631FNIEMOP_DEF(iemOp_eAX_Iv)
11632{
11633 IEMOP_MNEMONIC("mov rAX,IV");
11634 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pIemCpu->uRexB);
11635}
11636
11637
11638/** Opcode 0xb9. */
11639FNIEMOP_DEF(iemOp_eCX_Iv)
11640{
11641 IEMOP_MNEMONIC("mov rCX,IV");
11642 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pIemCpu->uRexB);
11643}
11644
11645
11646/** Opcode 0xba. */
11647FNIEMOP_DEF(iemOp_eDX_Iv)
11648{
11649 IEMOP_MNEMONIC("mov rDX,IV");
11650 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pIemCpu->uRexB);
11651}
11652
11653
11654/** Opcode 0xbb. */
11655FNIEMOP_DEF(iemOp_eBX_Iv)
11656{
11657 IEMOP_MNEMONIC("mov rBX,IV");
11658 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pIemCpu->uRexB);
11659}
11660
11661
11662/** Opcode 0xbc. */
11663FNIEMOP_DEF(iemOp_eSP_Iv)
11664{
11665 IEMOP_MNEMONIC("mov rSP,IV");
11666 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pIemCpu->uRexB);
11667}
11668
11669
11670/** Opcode 0xbd. */
11671FNIEMOP_DEF(iemOp_eBP_Iv)
11672{
11673 IEMOP_MNEMONIC("mov rBP,IV");
11674 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pIemCpu->uRexB);
11675}
11676
11677
11678/** Opcode 0xbe. */
11679FNIEMOP_DEF(iemOp_eSI_Iv)
11680{
11681 IEMOP_MNEMONIC("mov rSI,IV");
11682 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pIemCpu->uRexB);
11683}
11684
11685
11686/** Opcode 0xbf. */
11687FNIEMOP_DEF(iemOp_eDI_Iv)
11688{
11689 IEMOP_MNEMONIC("mov rDI,IV");
11690 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pIemCpu->uRexB);
11691}
11692
11693
11694/** Opcode 0xc0. */
11695FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
11696{
11697 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11698 PCIEMOPSHIFTSIZES pImpl;
11699 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11700 {
11701 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
11702 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
11703 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
11704 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
11705 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
11706 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
11707 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
11708 case 6: return IEMOP_RAISE_INVALID_OPCODE();
11709 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
11710 }
11711 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
11712
11713 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11714 {
11715 /* register */
11716 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11717 IEMOP_HLP_NO_LOCK_PREFIX();
11718 IEM_MC_BEGIN(3, 0);
11719 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11720 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11721 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11722 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11723 IEM_MC_REF_EFLAGS(pEFlags);
11724 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
11725 IEM_MC_ADVANCE_RIP();
11726 IEM_MC_END();
11727 }
11728 else
11729 {
11730 /* memory */
11731 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11732 IEM_MC_BEGIN(3, 2);
11733 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11734 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11735 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11736 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11737
11738 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11739 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11740 IEM_MC_ASSIGN(cShiftArg, cShift);
11741 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11742 IEM_MC_FETCH_EFLAGS(EFlags);
11743 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
11744
11745 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
11746 IEM_MC_COMMIT_EFLAGS(EFlags);
11747 IEM_MC_ADVANCE_RIP();
11748 IEM_MC_END();
11749 }
11750 return VINF_SUCCESS;
11751}
11752
11753
11754/** Opcode 0xc1. */
11755FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
11756{
11757 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11758 PCIEMOPSHIFTSIZES pImpl;
11759 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11760 {
11761 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
11762 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
11763 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
11764 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
11765 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
11766 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
11767 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
11768 case 6: return IEMOP_RAISE_INVALID_OPCODE();
11769 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
11770 }
11771 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
11772
11773 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11774 {
11775 /* register */
11776 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11777 IEMOP_HLP_NO_LOCK_PREFIX();
11778 switch (pIemCpu->enmEffOpSize)
11779 {
11780 case IEMMODE_16BIT:
11781 IEM_MC_BEGIN(3, 0);
11782 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11783 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11784 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11785 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11786 IEM_MC_REF_EFLAGS(pEFlags);
11787 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
11788 IEM_MC_ADVANCE_RIP();
11789 IEM_MC_END();
11790 return VINF_SUCCESS;
11791
11792 case IEMMODE_32BIT:
11793 IEM_MC_BEGIN(3, 0);
11794 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11795 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11796 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11797 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11798 IEM_MC_REF_EFLAGS(pEFlags);
11799 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
11800 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
11801 IEM_MC_ADVANCE_RIP();
11802 IEM_MC_END();
11803 return VINF_SUCCESS;
11804
11805 case IEMMODE_64BIT:
11806 IEM_MC_BEGIN(3, 0);
11807 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11808 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11809 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11810 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11811 IEM_MC_REF_EFLAGS(pEFlags);
11812 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
11813 IEM_MC_ADVANCE_RIP();
11814 IEM_MC_END();
11815 return VINF_SUCCESS;
11816
11817 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11818 }
11819 }
11820 else
11821 {
11822 /* memory */
11823 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11824 switch (pIemCpu->enmEffOpSize)
11825 {
11826 case IEMMODE_16BIT:
11827 IEM_MC_BEGIN(3, 2);
11828 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11829 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11830 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11831 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11832
11833 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11834 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11835 IEM_MC_ASSIGN(cShiftArg, cShift);
11836 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11837 IEM_MC_FETCH_EFLAGS(EFlags);
11838 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
11839
11840 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
11841 IEM_MC_COMMIT_EFLAGS(EFlags);
11842 IEM_MC_ADVANCE_RIP();
11843 IEM_MC_END();
11844 return VINF_SUCCESS;
11845
11846 case IEMMODE_32BIT:
11847 IEM_MC_BEGIN(3, 2);
11848 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11849 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11850 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11851 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11852
11853 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11854 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11855 IEM_MC_ASSIGN(cShiftArg, cShift);
11856 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11857 IEM_MC_FETCH_EFLAGS(EFlags);
11858 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
11859
11860 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
11861 IEM_MC_COMMIT_EFLAGS(EFlags);
11862 IEM_MC_ADVANCE_RIP();
11863 IEM_MC_END();
11864 return VINF_SUCCESS;
11865
11866 case IEMMODE_64BIT:
11867 IEM_MC_BEGIN(3, 2);
11868 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11869 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11870 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11871 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11872
11873 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11874 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11875 IEM_MC_ASSIGN(cShiftArg, cShift);
11876 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11877 IEM_MC_FETCH_EFLAGS(EFlags);
11878 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
11879
11880 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
11881 IEM_MC_COMMIT_EFLAGS(EFlags);
11882 IEM_MC_ADVANCE_RIP();
11883 IEM_MC_END();
11884 return VINF_SUCCESS;
11885
11886 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11887 }
11888 }
11889}
11890
11891
11892/** Opcode 0xc2. */
11893FNIEMOP_DEF(iemOp_retn_Iw)
11894{
11895 IEMOP_MNEMONIC("retn Iw");
11896 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11897 IEMOP_HLP_NO_LOCK_PREFIX();
11898 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11899 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
11900}
11901
11902
11903/** Opcode 0xc3. */
11904FNIEMOP_DEF(iemOp_retn)
11905{
11906 IEMOP_MNEMONIC("retn");
11907 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11908 IEMOP_HLP_NO_LOCK_PREFIX();
11909 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
11910}
11911
11912
11913/** Opcode 0xc4. */
11914FNIEMOP_DEF(iemOp_les_Gv_Mp)
11915{
11916 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11917 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
11918 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11919 {
11920 IEMOP_MNEMONIC("2-byte-vex");
11921 /* The LES instruction is invalid 64-bit mode. In legacy and
11922 compatability mode it is invalid with MOD=3.
11923 The use as a VEX prefix is made possible by assigning the inverted
11924 REX.R to the top MOD bit, and the top bit in the inverted register
11925 specifier to the bottom MOD bit, thereby effectively limiting 32-bit
11926 to accessing registers 0..7 in this VEX form. */
11927 /** @todo VEX: Just use new tables for it. */
11928 return IEMOP_RAISE_INVALID_OPCODE();
11929 }
11930 IEMOP_MNEMONIC("les Gv,Mp");
11931 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm);
11932}
11933
11934
11935/** Opcode 0xc5. */
11936FNIEMOP_DEF(iemOp_lds_Gv_Mp)
11937{
11938 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11939 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
11940 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11941 {
11942 IEMOP_MNEMONIC("3-byte-vex");
11943 /* The LDS instruction is invalid 64-bit mode. In legacy and
11944 compatability mode it is invalid with MOD=3.
11945 The use as a VEX prefix is made possible by assigning the inverted
11946 REX.R and REX.X to the two MOD bits, since the REX bits are ignored
11947 outside of 64-bit mode. */
11948 /** @todo VEX: Just use new tables for it. */
11949 return IEMOP_RAISE_INVALID_OPCODE();
11950 }
11951 IEMOP_MNEMONIC("lds Gv,Mp");
11952 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_DS, bRm);
11953}
11954
11955
11956/** Opcode 0xc6. */
11957FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
11958{
11959 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11960 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11961 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
11962 return IEMOP_RAISE_INVALID_OPCODE();
11963 IEMOP_MNEMONIC("mov Eb,Ib");
11964
11965 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11966 {
11967 /* register access */
11968 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11969 IEM_MC_BEGIN(0, 0);
11970 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
11971 IEM_MC_ADVANCE_RIP();
11972 IEM_MC_END();
11973 }
11974 else
11975 {
11976 /* memory access. */
11977 IEM_MC_BEGIN(0, 1);
11978 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11979 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11980 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11981 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
11982 IEM_MC_ADVANCE_RIP();
11983 IEM_MC_END();
11984 }
11985 return VINF_SUCCESS;
11986}
11987
11988
11989/** Opcode 0xc7. */
11990FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
11991{
11992 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11993 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11994 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
11995 return IEMOP_RAISE_INVALID_OPCODE();
11996 IEMOP_MNEMONIC("mov Ev,Iz");
11997
11998 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11999 {
12000 /* register access */
12001 switch (pIemCpu->enmEffOpSize)
12002 {
12003 case IEMMODE_16BIT:
12004 IEM_MC_BEGIN(0, 0);
12005 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12006 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
12007 IEM_MC_ADVANCE_RIP();
12008 IEM_MC_END();
12009 return VINF_SUCCESS;
12010
12011 case IEMMODE_32BIT:
12012 IEM_MC_BEGIN(0, 0);
12013 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12014 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
12015 IEM_MC_ADVANCE_RIP();
12016 IEM_MC_END();
12017 return VINF_SUCCESS;
12018
12019 case IEMMODE_64BIT:
12020 IEM_MC_BEGIN(0, 0);
12021 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12022 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
12023 IEM_MC_ADVANCE_RIP();
12024 IEM_MC_END();
12025 return VINF_SUCCESS;
12026
12027 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12028 }
12029 }
12030 else
12031 {
12032 /* memory access. */
12033 switch (pIemCpu->enmEffOpSize)
12034 {
12035 case IEMMODE_16BIT:
12036 IEM_MC_BEGIN(0, 1);
12037 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12038 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
12039 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12040 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
12041 IEM_MC_ADVANCE_RIP();
12042 IEM_MC_END();
12043 return VINF_SUCCESS;
12044
12045 case IEMMODE_32BIT:
12046 IEM_MC_BEGIN(0, 1);
12047 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12048 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12049 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12050 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
12051 IEM_MC_ADVANCE_RIP();
12052 IEM_MC_END();
12053 return VINF_SUCCESS;
12054
12055 case IEMMODE_64BIT:
12056 IEM_MC_BEGIN(0, 1);
12057 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12058 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12059 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12060 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
12061 IEM_MC_ADVANCE_RIP();
12062 IEM_MC_END();
12063 return VINF_SUCCESS;
12064
12065 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12066 }
12067 }
12068}
12069
12070
12071
12072
12073/** Opcode 0xc8. */
12074FNIEMOP_DEF(iemOp_enter_Iw_Ib)
12075{
12076 IEMOP_MNEMONIC("enter Iw,Ib");
12077 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12078 IEMOP_HLP_NO_LOCK_PREFIX();
12079 uint16_t cbFrame; IEM_OPCODE_GET_NEXT_U16(&cbFrame);
12080 uint8_t u8NestingLevel; IEM_OPCODE_GET_NEXT_U8(&u8NestingLevel);
12081 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_enter, pIemCpu->enmEffOpSize, cbFrame, u8NestingLevel);
12082}
12083
12084
12085/** Opcode 0xc9. */
12086FNIEMOP_DEF(iemOp_leave)
12087{
12088 IEMOP_MNEMONIC("retn");
12089 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12090 IEMOP_HLP_NO_LOCK_PREFIX();
12091 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
12092}
12093
12094
12095/** Opcode 0xca. */
12096FNIEMOP_DEF(iemOp_retf_Iw)
12097{
12098 IEMOP_MNEMONIC("retf Iw");
12099 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12100 IEMOP_HLP_NO_LOCK_PREFIX();
12101 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12102 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
12103}
12104
12105
12106/** Opcode 0xcb. */
12107FNIEMOP_DEF(iemOp_retf)
12108{
12109 IEMOP_MNEMONIC("retf");
12110 IEMOP_HLP_NO_LOCK_PREFIX();
12111 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12112 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
12113}
12114
12115
12116/** Opcode 0xcc. */
12117FNIEMOP_DEF(iemOp_int_3)
12118{
12119 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
12120}
12121
12122
12123/** Opcode 0xcd. */
12124FNIEMOP_DEF(iemOp_int_Ib)
12125{
12126 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
12127 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
12128}
12129
12130
12131/** Opcode 0xce. */
12132FNIEMOP_DEF(iemOp_into)
12133{
12134 IEM_MC_BEGIN(2, 0);
12135 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
12136 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
12137 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
12138 IEM_MC_END();
12139 return VINF_SUCCESS;
12140}
12141
12142
12143/** Opcode 0xcf. */
12144FNIEMOP_DEF(iemOp_iret)
12145{
12146 IEMOP_MNEMONIC("iret");
12147 IEMOP_HLP_NO_LOCK_PREFIX();
12148 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
12149}
12150
12151
12152/** Opcode 0xd0. */
12153FNIEMOP_DEF(iemOp_Grp2_Eb_1)
12154{
12155 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12156 PCIEMOPSHIFTSIZES pImpl;
12157 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12158 {
12159 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
12160 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
12161 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
12162 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
12163 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
12164 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
12165 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
12166 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12167 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12168 }
12169 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12170
12171 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12172 {
12173 /* register */
12174 IEMOP_HLP_NO_LOCK_PREFIX();
12175 IEM_MC_BEGIN(3, 0);
12176 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12177 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12178 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12179 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12180 IEM_MC_REF_EFLAGS(pEFlags);
12181 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12182 IEM_MC_ADVANCE_RIP();
12183 IEM_MC_END();
12184 }
12185 else
12186 {
12187 /* memory */
12188 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12189 IEM_MC_BEGIN(3, 2);
12190 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12191 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12192 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12193 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12194
12195 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12196 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12197 IEM_MC_FETCH_EFLAGS(EFlags);
12198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12199
12200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12201 IEM_MC_COMMIT_EFLAGS(EFlags);
12202 IEM_MC_ADVANCE_RIP();
12203 IEM_MC_END();
12204 }
12205 return VINF_SUCCESS;
12206}
12207
12208
12209
12210/** Opcode 0xd1. */
12211FNIEMOP_DEF(iemOp_Grp2_Ev_1)
12212{
12213 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12214 PCIEMOPSHIFTSIZES pImpl;
12215 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12216 {
12217 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
12218 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
12219 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
12220 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
12221 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
12222 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
12223 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
12224 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12225 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12226 }
12227 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12228
12229 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12230 {
12231 /* register */
12232 IEMOP_HLP_NO_LOCK_PREFIX();
12233 switch (pIemCpu->enmEffOpSize)
12234 {
12235 case IEMMODE_16BIT:
12236 IEM_MC_BEGIN(3, 0);
12237 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12238 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12239 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12240 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12241 IEM_MC_REF_EFLAGS(pEFlags);
12242 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12243 IEM_MC_ADVANCE_RIP();
12244 IEM_MC_END();
12245 return VINF_SUCCESS;
12246
12247 case IEMMODE_32BIT:
12248 IEM_MC_BEGIN(3, 0);
12249 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12250 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12251 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12252 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12253 IEM_MC_REF_EFLAGS(pEFlags);
12254 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12255 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12256 IEM_MC_ADVANCE_RIP();
12257 IEM_MC_END();
12258 return VINF_SUCCESS;
12259
12260 case IEMMODE_64BIT:
12261 IEM_MC_BEGIN(3, 0);
12262 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12263 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12264 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12265 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12266 IEM_MC_REF_EFLAGS(pEFlags);
12267 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12268 IEM_MC_ADVANCE_RIP();
12269 IEM_MC_END();
12270 return VINF_SUCCESS;
12271
12272 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12273 }
12274 }
12275 else
12276 {
12277 /* memory */
12278 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12279 switch (pIemCpu->enmEffOpSize)
12280 {
12281 case IEMMODE_16BIT:
12282 IEM_MC_BEGIN(3, 2);
12283 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12284 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12285 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12286 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12287
12288 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12289 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12290 IEM_MC_FETCH_EFLAGS(EFlags);
12291 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12292
12293 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12294 IEM_MC_COMMIT_EFLAGS(EFlags);
12295 IEM_MC_ADVANCE_RIP();
12296 IEM_MC_END();
12297 return VINF_SUCCESS;
12298
12299 case IEMMODE_32BIT:
12300 IEM_MC_BEGIN(3, 2);
12301 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12302 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12303 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12304 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12305
12306 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12307 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12308 IEM_MC_FETCH_EFLAGS(EFlags);
12309 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12310
12311 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12312 IEM_MC_COMMIT_EFLAGS(EFlags);
12313 IEM_MC_ADVANCE_RIP();
12314 IEM_MC_END();
12315 return VINF_SUCCESS;
12316
12317 case IEMMODE_64BIT:
12318 IEM_MC_BEGIN(3, 2);
12319 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12320 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12321 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12322 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12323
12324 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12325 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12326 IEM_MC_FETCH_EFLAGS(EFlags);
12327 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12328
12329 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12330 IEM_MC_COMMIT_EFLAGS(EFlags);
12331 IEM_MC_ADVANCE_RIP();
12332 IEM_MC_END();
12333 return VINF_SUCCESS;
12334
12335 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12336 }
12337 }
12338}
12339
12340
12341/** Opcode 0xd2. */
12342FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
12343{
12344 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12345 PCIEMOPSHIFTSIZES pImpl;
12346 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12347 {
12348 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
12349 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
12350 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
12351 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
12352 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
12353 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
12354 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
12355 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12356 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
12357 }
12358 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12359
12360 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12361 {
12362 /* register */
12363 IEMOP_HLP_NO_LOCK_PREFIX();
12364 IEM_MC_BEGIN(3, 0);
12365 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12366 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12367 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12368 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12369 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12370 IEM_MC_REF_EFLAGS(pEFlags);
12371 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12372 IEM_MC_ADVANCE_RIP();
12373 IEM_MC_END();
12374 }
12375 else
12376 {
12377 /* memory */
12378 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12379 IEM_MC_BEGIN(3, 2);
12380 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12381 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12382 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12383 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12384
12385 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12386 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12387 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12388 IEM_MC_FETCH_EFLAGS(EFlags);
12389 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12390
12391 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12392 IEM_MC_COMMIT_EFLAGS(EFlags);
12393 IEM_MC_ADVANCE_RIP();
12394 IEM_MC_END();
12395 }
12396 return VINF_SUCCESS;
12397}
12398
12399
12400/** Opcode 0xd3. */
12401FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
12402{
12403 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12404 PCIEMOPSHIFTSIZES pImpl;
12405 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12406 {
12407 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
12408 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
12409 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
12410 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
12411 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
12412 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
12413 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
12414 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12415 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
12416 }
12417 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12418
12419 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12420 {
12421 /* register */
12422 IEMOP_HLP_NO_LOCK_PREFIX();
12423 switch (pIemCpu->enmEffOpSize)
12424 {
12425 case IEMMODE_16BIT:
12426 IEM_MC_BEGIN(3, 0);
12427 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12428 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12429 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12430 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12431 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12432 IEM_MC_REF_EFLAGS(pEFlags);
12433 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12434 IEM_MC_ADVANCE_RIP();
12435 IEM_MC_END();
12436 return VINF_SUCCESS;
12437
12438 case IEMMODE_32BIT:
12439 IEM_MC_BEGIN(3, 0);
12440 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12441 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12442 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12443 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12444 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12445 IEM_MC_REF_EFLAGS(pEFlags);
12446 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12447 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12448 IEM_MC_ADVANCE_RIP();
12449 IEM_MC_END();
12450 return VINF_SUCCESS;
12451
12452 case IEMMODE_64BIT:
12453 IEM_MC_BEGIN(3, 0);
12454 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12455 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12456 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12457 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12458 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12459 IEM_MC_REF_EFLAGS(pEFlags);
12460 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12461 IEM_MC_ADVANCE_RIP();
12462 IEM_MC_END();
12463 return VINF_SUCCESS;
12464
12465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12466 }
12467 }
12468 else
12469 {
12470 /* memory */
12471 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12472 switch (pIemCpu->enmEffOpSize)
12473 {
12474 case IEMMODE_16BIT:
12475 IEM_MC_BEGIN(3, 2);
12476 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12477 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12478 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12479 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12480
12481 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12482 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12483 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12484 IEM_MC_FETCH_EFLAGS(EFlags);
12485 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12486
12487 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12488 IEM_MC_COMMIT_EFLAGS(EFlags);
12489 IEM_MC_ADVANCE_RIP();
12490 IEM_MC_END();
12491 return VINF_SUCCESS;
12492
12493 case IEMMODE_32BIT:
12494 IEM_MC_BEGIN(3, 2);
12495 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12496 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12497 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12498 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12499
12500 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12501 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12502 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12503 IEM_MC_FETCH_EFLAGS(EFlags);
12504 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12505
12506 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12507 IEM_MC_COMMIT_EFLAGS(EFlags);
12508 IEM_MC_ADVANCE_RIP();
12509 IEM_MC_END();
12510 return VINF_SUCCESS;
12511
12512 case IEMMODE_64BIT:
12513 IEM_MC_BEGIN(3, 2);
12514 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12515 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12516 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12517 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12518
12519 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12520 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12521 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12522 IEM_MC_FETCH_EFLAGS(EFlags);
12523 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12524
12525 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12526 IEM_MC_COMMIT_EFLAGS(EFlags);
12527 IEM_MC_ADVANCE_RIP();
12528 IEM_MC_END();
12529 return VINF_SUCCESS;
12530
12531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12532 }
12533 }
12534}
12535
12536/** Opcode 0xd4. */
12537FNIEMOP_DEF(iemOp_aam_Ib)
12538{
12539 IEMOP_MNEMONIC("aam Ib");
12540 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
12541 IEMOP_HLP_NO_LOCK_PREFIX();
12542 IEMOP_HLP_NO_64BIT();
12543 if (!bImm)
12544 return IEMOP_RAISE_DIVIDE_ERROR();
12545 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
12546}
12547
12548
12549/** Opcode 0xd5. */
12550FNIEMOP_DEF(iemOp_aad_Ib)
12551{
12552 IEMOP_MNEMONIC("aad Ib");
12553 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
12554 IEMOP_HLP_NO_LOCK_PREFIX();
12555 IEMOP_HLP_NO_64BIT();
12556 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
12557}
12558
12559
12560/** Opcode 0xd7. */
12561FNIEMOP_DEF(iemOp_xlat)
12562{
12563 IEMOP_MNEMONIC("xlat");
12564 IEMOP_HLP_NO_LOCK_PREFIX();
12565 switch (pIemCpu->enmEffAddrMode)
12566 {
12567 case IEMMODE_16BIT:
12568 IEM_MC_BEGIN(2, 0);
12569 IEM_MC_LOCAL(uint8_t, u8Tmp);
12570 IEM_MC_LOCAL(uint16_t, u16Addr);
12571 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
12572 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
12573 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
12574 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12575 IEM_MC_ADVANCE_RIP();
12576 IEM_MC_END();
12577 return VINF_SUCCESS;
12578
12579 case IEMMODE_32BIT:
12580 IEM_MC_BEGIN(2, 0);
12581 IEM_MC_LOCAL(uint8_t, u8Tmp);
12582 IEM_MC_LOCAL(uint32_t, u32Addr);
12583 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
12584 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
12585 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
12586 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12587 IEM_MC_ADVANCE_RIP();
12588 IEM_MC_END();
12589 return VINF_SUCCESS;
12590
12591 case IEMMODE_64BIT:
12592 IEM_MC_BEGIN(2, 0);
12593 IEM_MC_LOCAL(uint8_t, u8Tmp);
12594 IEM_MC_LOCAL(uint64_t, u64Addr);
12595 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
12596 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
12597 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
12598 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12599 IEM_MC_ADVANCE_RIP();
12600 IEM_MC_END();
12601 return VINF_SUCCESS;
12602
12603 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12604 }
12605}
12606
12607
12608/**
12609 * Common worker for FPU instructions working on ST0 and STn, and storing the
12610 * result in ST0.
12611 *
12612 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12613 */
12614FNIEMOP_DEF_2(iemOpHlpFpu_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
12615{
12616 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12617
12618 IEM_MC_BEGIN(3, 1);
12619 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12620 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12621 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12622 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12623
12624 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12625 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12626 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12627 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
12628 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
12629 IEM_MC_ELSE()
12630 IEM_MC_FPU_STACK_UNDERFLOW(0);
12631 IEM_MC_ENDIF();
12632 IEM_MC_USED_FPU();
12633 IEM_MC_ADVANCE_RIP();
12634
12635 IEM_MC_END();
12636 return VINF_SUCCESS;
12637}
12638
12639
12640/**
12641 * Common worker for FPU instructions working on ST0 and STn, and only affecting
12642 * flags.
12643 *
12644 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12645 */
12646FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
12647{
12648 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12649
12650 IEM_MC_BEGIN(3, 1);
12651 IEM_MC_LOCAL(uint16_t, u16Fsw);
12652 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12653 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12654 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12655
12656 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12657 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12658 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12659 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
12660 IEM_MC_UPDATE_FSW(u16Fsw);
12661 IEM_MC_ELSE()
12662 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
12663 IEM_MC_ENDIF();
12664 IEM_MC_USED_FPU();
12665 IEM_MC_ADVANCE_RIP();
12666
12667 IEM_MC_END();
12668 return VINF_SUCCESS;
12669}
12670
12671
12672/**
12673 * Common worker for FPU instructions working on ST0 and STn, only affecting
12674 * flags, and popping when done.
12675 *
12676 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12677 */
12678FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
12679{
12680 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12681
12682 IEM_MC_BEGIN(3, 1);
12683 IEM_MC_LOCAL(uint16_t, u16Fsw);
12684 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12685 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12686 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12687
12688 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12689 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12690 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12691 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
12692 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
12693 IEM_MC_ELSE()
12694 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(UINT8_MAX);
12695 IEM_MC_ENDIF();
12696 IEM_MC_USED_FPU();
12697 IEM_MC_ADVANCE_RIP();
12698
12699 IEM_MC_END();
12700 return VINF_SUCCESS;
12701}
12702
12703
12704/** Opcode 0xd8 11/0. */
12705FNIEMOP_DEF_1(iemOp_fadd_stN, uint8_t, bRm)
12706{
12707 IEMOP_MNEMONIC("fadd st0,stN");
12708 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fadd_r80_by_r80);
12709}
12710
12711
12712/** Opcode 0xd8 11/1. */
12713FNIEMOP_DEF_1(iemOp_fmul_stN, uint8_t, bRm)
12714{
12715 IEMOP_MNEMONIC("fmul st0,stN");
12716 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fmul_r80_by_r80);
12717}
12718
12719
12720/** Opcode 0xd8 11/2. */
12721FNIEMOP_DEF_1(iemOp_fcom_stN, uint8_t, bRm)
12722{
12723 IEMOP_MNEMONIC("fcom st0,stN");
12724 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fcom_r80_by_r80);
12725}
12726
12727
12728/** Opcode 0xd8 11/3. */
12729FNIEMOP_DEF_1(iemOp_fcomp_stN, uint8_t, bRm)
12730{
12731 IEMOP_MNEMONIC("fcomp st0,stN");
12732 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fcom_r80_by_r80);
12733}
12734
12735
12736/** Opcode 0xd8 11/4. */
12737FNIEMOP_DEF_1(iemOp_fsub_stN, uint8_t, bRm)
12738{
12739 IEMOP_MNEMONIC("fsub st0,stN");
12740 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsub_r80_by_r80);
12741}
12742
12743
12744/** Opcode 0xd8 11/5. */
12745FNIEMOP_DEF_1(iemOp_fsubr_stN, uint8_t, bRm)
12746{
12747 IEMOP_MNEMONIC("fsubr st0,stN");
12748 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsubr_r80_by_r80);
12749}
12750
12751
12752/** Opcode 0xd8 11/6. */
12753FNIEMOP_DEF_1(iemOp_fdiv_stN, uint8_t, bRm)
12754{
12755 IEMOP_MNEMONIC("fdiv st0,stN");
12756 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdiv_r80_by_r80);
12757}
12758
12759
12760/** Opcode 0xd8 11/7. */
12761FNIEMOP_DEF_1(iemOp_fdivr_stN, uint8_t, bRm)
12762{
12763 IEMOP_MNEMONIC("fdivr st0,stN");
12764 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdivr_r80_by_r80);
12765}
12766
12767
12768/**
12769 * Common worker for FPU instructions working on ST0 and an m32r, and storing
12770 * the result in ST0.
12771 *
12772 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12773 */
12774FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32r, uint8_t, bRm, PFNIEMAIMPLFPUR32, pfnAImpl)
12775{
12776 IEM_MC_BEGIN(3, 3);
12777 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12778 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12779 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12780 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12781 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12782 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12783
12784 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12785 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12786
12787 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12788 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12789 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12790
12791 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12792 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr32Val2);
12793 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
12794 IEM_MC_ELSE()
12795 IEM_MC_FPU_STACK_UNDERFLOW(0);
12796 IEM_MC_ENDIF();
12797 IEM_MC_USED_FPU();
12798 IEM_MC_ADVANCE_RIP();
12799
12800 IEM_MC_END();
12801 return VINF_SUCCESS;
12802}
12803
12804
12805/** Opcode 0xd8 !11/0. */
12806FNIEMOP_DEF_1(iemOp_fadd_m32r, uint8_t, bRm)
12807{
12808 IEMOP_MNEMONIC("fadd st0,m32r");
12809 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fadd_r80_by_r32);
12810}
12811
12812
12813/** Opcode 0xd8 !11/1. */
12814FNIEMOP_DEF_1(iemOp_fmul_m32r, uint8_t, bRm)
12815{
12816 IEMOP_MNEMONIC("fmul st0,m32r");
12817 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fmul_r80_by_r32);
12818}
12819
12820
12821/** Opcode 0xd8 !11/2. */
12822FNIEMOP_DEF_1(iemOp_fcom_m32r, uint8_t, bRm)
12823{
12824 IEMOP_MNEMONIC("fcom st0,m32r");
12825
12826 IEM_MC_BEGIN(3, 3);
12827 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12828 IEM_MC_LOCAL(uint16_t, u16Fsw);
12829 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12830 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12831 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12832 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12833
12834 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12835 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12836
12837 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12838 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12839 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12840
12841 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12842 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
12843 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
12844 IEM_MC_ELSE()
12845 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
12846 IEM_MC_ENDIF();
12847 IEM_MC_USED_FPU();
12848 IEM_MC_ADVANCE_RIP();
12849
12850 IEM_MC_END();
12851 return VINF_SUCCESS;
12852}
12853
12854
12855/** Opcode 0xd8 !11/3. */
12856FNIEMOP_DEF_1(iemOp_fcomp_m32r, uint8_t, bRm)
12857{
12858 IEMOP_MNEMONIC("fcomp st0,m32r");
12859
12860 IEM_MC_BEGIN(3, 3);
12861 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12862 IEM_MC_LOCAL(uint16_t, u16Fsw);
12863 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12864 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12865 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12866 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12867
12868 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12869 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12870
12871 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12872 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12873 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12874
12875 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12876 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
12877 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
12878 IEM_MC_ELSE()
12879 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
12880 IEM_MC_ENDIF();
12881 IEM_MC_USED_FPU();
12882 IEM_MC_ADVANCE_RIP();
12883
12884 IEM_MC_END();
12885 return VINF_SUCCESS;
12886}
12887
12888
12889/** Opcode 0xd8 !11/4. */
12890FNIEMOP_DEF_1(iemOp_fsub_m32r, uint8_t, bRm)
12891{
12892 IEMOP_MNEMONIC("fsub st0,m32r");
12893 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsub_r80_by_r32);
12894}
12895
12896
12897/** Opcode 0xd8 !11/5. */
12898FNIEMOP_DEF_1(iemOp_fsubr_m32r, uint8_t, bRm)
12899{
12900 IEMOP_MNEMONIC("fsubr st0,m32r");
12901 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsubr_r80_by_r32);
12902}
12903
12904
12905/** Opcode 0xd8 !11/6. */
12906FNIEMOP_DEF_1(iemOp_fdiv_m32r, uint8_t, bRm)
12907{
12908 IEMOP_MNEMONIC("fdiv st0,m32r");
12909 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdiv_r80_by_r32);
12910}
12911
12912
12913/** Opcode 0xd8 !11/7. */
12914FNIEMOP_DEF_1(iemOp_fdivr_m32r, uint8_t, bRm)
12915{
12916 IEMOP_MNEMONIC("fdivr st0,m32r");
12917 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdivr_r80_by_r32);
12918}
12919
12920
12921/** Opcode 0xd8. */
12922FNIEMOP_DEF(iemOp_EscF0)
12923{
12924 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
12925 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12926
12927 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12928 {
12929 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12930 {
12931 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN, bRm);
12932 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN, bRm);
12933 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm);
12934 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
12935 case 4: return FNIEMOP_CALL_1(iemOp_fsub_stN, bRm);
12936 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_stN, bRm);
12937 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_stN, bRm);
12938 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_stN, bRm);
12939 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12940 }
12941 }
12942 else
12943 {
12944 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12945 {
12946 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m32r, bRm);
12947 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m32r, bRm);
12948 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m32r, bRm);
12949 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m32r, bRm);
12950 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m32r, bRm);
12951 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m32r, bRm);
12952 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m32r, bRm);
12953 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m32r, bRm);
12954 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12955 }
12956 }
12957}
12958
12959
12960/** Opcode 0xd9 /0 mem32real
12961 * @sa iemOp_fld_m64r */
12962FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm)
12963{
12964 IEMOP_MNEMONIC("fld m32r");
12965
12966 IEM_MC_BEGIN(2, 3);
12967 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12968 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12969 IEM_MC_LOCAL(RTFLOAT32U, r32Val);
12970 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12971 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val, r32Val, 1);
12972
12973 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12974 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12975
12976 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12977 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12978 IEM_MC_FETCH_MEM_R32(r32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
12979
12980 IEM_MC_IF_FPUREG_IS_EMPTY(7)
12981 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r32_to_r80, pFpuRes, pr32Val);
12982 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
12983 IEM_MC_ELSE()
12984 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
12985 IEM_MC_ENDIF();
12986 IEM_MC_USED_FPU();
12987 IEM_MC_ADVANCE_RIP();
12988
12989 IEM_MC_END();
12990 return VINF_SUCCESS;
12991}
12992
12993
12994/** Opcode 0xd9 !11/2 mem32real */
12995FNIEMOP_DEF_1(iemOp_fst_m32r, uint8_t, bRm)
12996{
12997 IEMOP_MNEMONIC("fst m32r");
12998 IEM_MC_BEGIN(3, 2);
12999 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13000 IEM_MC_LOCAL(uint16_t, u16Fsw);
13001 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13002 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13003 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13004
13005 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13006 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13007 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13008 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13009
13010 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13011 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13012 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13013 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13014 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13015 IEM_MC_ELSE()
13016 IEM_MC_IF_FCW_IM()
13017 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13018 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13019 IEM_MC_ENDIF();
13020 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13021 IEM_MC_ENDIF();
13022 IEM_MC_USED_FPU();
13023 IEM_MC_ADVANCE_RIP();
13024
13025 IEM_MC_END();
13026 return VINF_SUCCESS;
13027}
13028
13029
13030/** Opcode 0xd9 !11/3 */
13031FNIEMOP_DEF_1(iemOp_fstp_m32r, uint8_t, bRm)
13032{
13033 IEMOP_MNEMONIC("fstp m32r");
13034 IEM_MC_BEGIN(3, 2);
13035 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13036 IEM_MC_LOCAL(uint16_t, u16Fsw);
13037 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13038 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13039 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13040
13041 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13042 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13043 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13044 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13045
13046 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13047 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13048 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13049 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13050 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13051 IEM_MC_ELSE()
13052 IEM_MC_IF_FCW_IM()
13053 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13054 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13055 IEM_MC_ENDIF();
13056 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13057 IEM_MC_ENDIF();
13058 IEM_MC_USED_FPU();
13059 IEM_MC_ADVANCE_RIP();
13060
13061 IEM_MC_END();
13062 return VINF_SUCCESS;
13063}
13064
13065
13066/** Opcode 0xd9 !11/4 */
13067FNIEMOP_DEF_1(iemOp_fldenv, uint8_t, bRm)
13068{
13069 IEMOP_MNEMONIC("fldenv m14/28byte");
13070 IEM_MC_BEGIN(3, 0);
13071 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13072 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
13073 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
13074 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13075 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13076 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13077 IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc);
13078 IEM_MC_END();
13079 return VINF_SUCCESS;
13080}
13081
13082
13083/** Opcode 0xd9 !11/5 */
13084FNIEMOP_DEF_1(iemOp_fldcw, uint8_t, bRm)
13085{
13086 IEMOP_MNEMONIC("fldcw m2byte");
13087 IEM_MC_BEGIN(1, 1);
13088 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13089 IEM_MC_ARG(uint16_t, u16Fsw, 0);
13090 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13091 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13092 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13093 IEM_MC_FETCH_MEM_U16(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13094 IEM_MC_CALL_CIMPL_1(iemCImpl_fldcw, u16Fsw);
13095 IEM_MC_END();
13096 return VINF_SUCCESS;
13097}
13098
13099
13100/** Opcode 0xd9 !11/6 */
13101FNIEMOP_DEF_1(iemOp_fnstenv, uint8_t, bRm)
13102{
13103 IEMOP_MNEMONIC("fstenv m14/m28byte");
13104 IEM_MC_BEGIN(3, 0);
13105 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13106 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
13107 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
13108 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13109 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13110 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13111 IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst);
13112 IEM_MC_END();
13113 return VINF_SUCCESS;
13114}
13115
13116
13117/** Opcode 0xd9 !11/7 */
13118FNIEMOP_DEF_1(iemOp_fnstcw, uint8_t, bRm)
13119{
13120 IEMOP_MNEMONIC("fnstcw m2byte");
13121 IEM_MC_BEGIN(2, 0);
13122 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13123 IEM_MC_LOCAL(uint16_t, u16Fcw);
13124 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13125 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13126 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13127 IEM_MC_FETCH_FCW(u16Fcw);
13128 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Fcw);
13129 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13130 IEM_MC_END();
13131 return VINF_SUCCESS;
13132}
13133
13134
13135/** Opcode 0xd9 0xc9, 0xd9 0xd8-0xdf, ++?. */
13136FNIEMOP_DEF(iemOp_fnop)
13137{
13138 IEMOP_MNEMONIC("fnop");
13139 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13140
13141 IEM_MC_BEGIN(0, 0);
13142 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13143 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13144 /** @todo Testcase: looks like FNOP leaves FOP alone but updates FPUIP. Could be
13145 * intel optimizations. Investigate. */
13146 IEM_MC_UPDATE_FPU_OPCODE_IP();
13147 IEM_MC_USED_FPU();
13148 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13149 IEM_MC_END();
13150 return VINF_SUCCESS;
13151}
13152
13153
13154/** Opcode 0xd9 11/0 stN */
13155FNIEMOP_DEF_1(iemOp_fld_stN, uint8_t, bRm)
13156{
13157 IEMOP_MNEMONIC("fld stN");
13158 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13159
13160 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13161 * indicates that it does. */
13162 IEM_MC_BEGIN(0, 2);
13163 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13164 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13165 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13166 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13167 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, bRm & X86_MODRM_RM_MASK)
13168 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13169 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13170 IEM_MC_ELSE()
13171 IEM_MC_FPU_STACK_PUSH_UNDERFLOW();
13172 IEM_MC_ENDIF();
13173 IEM_MC_USED_FPU();
13174 IEM_MC_ADVANCE_RIP();
13175 IEM_MC_END();
13176
13177 return VINF_SUCCESS;
13178}
13179
13180
13181/** Opcode 0xd9 11/3 stN */
13182FNIEMOP_DEF_1(iemOp_fxch_stN, uint8_t, bRm)
13183{
13184 IEMOP_MNEMONIC("fxch stN");
13185 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13186
13187 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13188 * indicates that it does. */
13189 IEM_MC_BEGIN(1, 3);
13190 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value1);
13191 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value2);
13192 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13193 IEM_MC_ARG_CONST(uint8_t, iStReg, /*=*/ bRm & X86_MODRM_RM_MASK, 0);
13194 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13195 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13196 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13197 IEM_MC_SET_FPU_RESULT(FpuRes, X86_FSW_C1, pr80Value2);
13198 IEM_MC_STORE_FPUREG_R80_SRC_REF(bRm & X86_MODRM_RM_MASK, pr80Value1);
13199 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13200 IEM_MC_ELSE()
13201 IEM_MC_CALL_CIMPL_1(iemCImpl_fxch_underflow, iStReg);
13202 IEM_MC_ENDIF();
13203 IEM_MC_USED_FPU();
13204 IEM_MC_ADVANCE_RIP();
13205 IEM_MC_END();
13206
13207 return VINF_SUCCESS;
13208}
13209
13210
13211/** Opcode 0xd9 11/4, 0xdd 11/2. */
13212FNIEMOP_DEF_1(iemOp_fstp_stN, uint8_t, bRm)
13213{
13214 IEMOP_MNEMONIC("fstp st0,stN");
13215 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13216
13217 /* fstp st0, st0 is frequendly used as an official 'ffreep st0' sequence. */
13218 uint8_t const iDstReg = bRm & X86_MODRM_RM_MASK;
13219 if (!iDstReg)
13220 {
13221 IEM_MC_BEGIN(0, 1);
13222 IEM_MC_LOCAL_CONST(uint16_t, u16Fsw, /*=*/ 0);
13223 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13224 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13225 IEM_MC_IF_FPUREG_NOT_EMPTY(0)
13226 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
13227 IEM_MC_ELSE()
13228 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(0);
13229 IEM_MC_ENDIF();
13230 IEM_MC_USED_FPU();
13231 IEM_MC_ADVANCE_RIP();
13232 IEM_MC_END();
13233 }
13234 else
13235 {
13236 IEM_MC_BEGIN(0, 2);
13237 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13238 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13239 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13240 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13241 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13242 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13243 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, iDstReg);
13244 IEM_MC_ELSE()
13245 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(iDstReg);
13246 IEM_MC_ENDIF();
13247 IEM_MC_USED_FPU();
13248 IEM_MC_ADVANCE_RIP();
13249 IEM_MC_END();
13250 }
13251 return VINF_SUCCESS;
13252}
13253
13254
13255/**
13256 * Common worker for FPU instructions working on ST0 and replaces it with the
13257 * result, i.e. unary operators.
13258 *
13259 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13260 */
13261FNIEMOP_DEF_1(iemOpHlpFpu_st0, PFNIEMAIMPLFPUR80UNARY, pfnAImpl)
13262{
13263 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13264
13265 IEM_MC_BEGIN(2, 1);
13266 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13267 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13268 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13269
13270 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13271 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13272 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13273 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuRes, pr80Value);
13274 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13275 IEM_MC_ELSE()
13276 IEM_MC_FPU_STACK_UNDERFLOW(0);
13277 IEM_MC_ENDIF();
13278 IEM_MC_USED_FPU();
13279 IEM_MC_ADVANCE_RIP();
13280
13281 IEM_MC_END();
13282 return VINF_SUCCESS;
13283}
13284
13285
13286/** Opcode 0xd9 0xe0. */
13287FNIEMOP_DEF(iemOp_fchs)
13288{
13289 IEMOP_MNEMONIC("fchs st0");
13290 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fchs_r80);
13291}
13292
13293
13294/** Opcode 0xd9 0xe1. */
13295FNIEMOP_DEF(iemOp_fabs)
13296{
13297 IEMOP_MNEMONIC("fabs st0");
13298 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fabs_r80);
13299}
13300
13301
13302/**
13303 * Common worker for FPU instructions working on ST0 and only returns FSW.
13304 *
13305 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13306 */
13307FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0, PFNIEMAIMPLFPUR80UNARYFSW, pfnAImpl)
13308{
13309 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13310
13311 IEM_MC_BEGIN(2, 1);
13312 IEM_MC_LOCAL(uint16_t, u16Fsw);
13313 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13314 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13315
13316 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13317 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13318 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13319 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pu16Fsw, pr80Value);
13320 IEM_MC_UPDATE_FSW(u16Fsw);
13321 IEM_MC_ELSE()
13322 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
13323 IEM_MC_ENDIF();
13324 IEM_MC_USED_FPU();
13325 IEM_MC_ADVANCE_RIP();
13326
13327 IEM_MC_END();
13328 return VINF_SUCCESS;
13329}
13330
13331
13332/** Opcode 0xd9 0xe4. */
13333FNIEMOP_DEF(iemOp_ftst)
13334{
13335 IEMOP_MNEMONIC("ftst st0");
13336 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_ftst_r80);
13337}
13338
13339
13340/** Opcode 0xd9 0xe5. */
13341FNIEMOP_DEF(iemOp_fxam)
13342{
13343 IEMOP_MNEMONIC("fxam st0");
13344 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_fxam_r80);
13345}
13346
13347
13348/**
13349 * Common worker for FPU instructions pushing a constant onto the FPU stack.
13350 *
13351 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13352 */
13353FNIEMOP_DEF_1(iemOpHlpFpuPushConstant, PFNIEMAIMPLFPUR80LDCONST, pfnAImpl)
13354{
13355 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13356
13357 IEM_MC_BEGIN(1, 1);
13358 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13359 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13360
13361 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13362 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13363 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13364 IEM_MC_CALL_FPU_AIMPL_1(pfnAImpl, pFpuRes);
13365 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13366 IEM_MC_ELSE()
13367 IEM_MC_FPU_STACK_PUSH_OVERFLOW();
13368 IEM_MC_ENDIF();
13369 IEM_MC_USED_FPU();
13370 IEM_MC_ADVANCE_RIP();
13371
13372 IEM_MC_END();
13373 return VINF_SUCCESS;
13374}
13375
13376
13377/** Opcode 0xd9 0xe8. */
13378FNIEMOP_DEF(iemOp_fld1)
13379{
13380 IEMOP_MNEMONIC("fld1");
13381 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fld1);
13382}
13383
13384
13385/** Opcode 0xd9 0xe9. */
13386FNIEMOP_DEF(iemOp_fldl2t)
13387{
13388 IEMOP_MNEMONIC("fldl2t");
13389 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2t);
13390}
13391
13392
13393/** Opcode 0xd9 0xea. */
13394FNIEMOP_DEF(iemOp_fldl2e)
13395{
13396 IEMOP_MNEMONIC("fldl2e");
13397 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2e);
13398}
13399
13400/** Opcode 0xd9 0xeb. */
13401FNIEMOP_DEF(iemOp_fldpi)
13402{
13403 IEMOP_MNEMONIC("fldpi");
13404 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldpi);
13405}
13406
13407
13408/** Opcode 0xd9 0xec. */
13409FNIEMOP_DEF(iemOp_fldlg2)
13410{
13411 IEMOP_MNEMONIC("fldlg2");
13412 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldlg2);
13413}
13414
13415/** Opcode 0xd9 0xed. */
13416FNIEMOP_DEF(iemOp_fldln2)
13417{
13418 IEMOP_MNEMONIC("fldln2");
13419 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldln2);
13420}
13421
13422
13423/** Opcode 0xd9 0xee. */
13424FNIEMOP_DEF(iemOp_fldz)
13425{
13426 IEMOP_MNEMONIC("fldz");
13427 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldz);
13428}
13429
13430
13431/** Opcode 0xd9 0xf0. */
13432FNIEMOP_DEF(iemOp_f2xm1)
13433{
13434 IEMOP_MNEMONIC("f2xm1 st0");
13435 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_f2xm1_r80);
13436}
13437
13438
13439/** Opcode 0xd9 0xf1. */
13440FNIEMOP_DEF(iemOp_fylx2)
13441{
13442 IEMOP_MNEMONIC("fylx2 st0");
13443 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fyl2x_r80);
13444}
13445
13446
13447/**
13448 * Common worker for FPU instructions working on ST0 and having two outputs, one
13449 * replacing ST0 and one pushed onto the stack.
13450 *
13451 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13452 */
13453FNIEMOP_DEF_1(iemOpHlpFpuReplace_st0_push, PFNIEMAIMPLFPUR80UNARYTWO, pfnAImpl)
13454{
13455 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13456
13457 IEM_MC_BEGIN(2, 1);
13458 IEM_MC_LOCAL(IEMFPURESULTTWO, FpuResTwo);
13459 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULTTWO, pFpuResTwo, FpuResTwo, 0);
13460 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13461
13462 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13463 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13464 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13465 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuResTwo, pr80Value);
13466 IEM_MC_PUSH_FPU_RESULT_TWO(FpuResTwo);
13467 IEM_MC_ELSE()
13468 IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO();
13469 IEM_MC_ENDIF();
13470 IEM_MC_USED_FPU();
13471 IEM_MC_ADVANCE_RIP();
13472
13473 IEM_MC_END();
13474 return VINF_SUCCESS;
13475}
13476
13477
13478/** Opcode 0xd9 0xf2. */
13479FNIEMOP_DEF(iemOp_fptan)
13480{
13481 IEMOP_MNEMONIC("fptan st0");
13482 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fptan_r80_r80);
13483}
13484
13485
13486/**
13487 * Common worker for FPU instructions working on STn and ST0, storing the result
13488 * in STn, and popping the stack unless IE, DE or ZE was raised.
13489 *
13490 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13491 */
13492FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
13493{
13494 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13495
13496 IEM_MC_BEGIN(3, 1);
13497 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13498 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13499 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13500 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13501
13502 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13503 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13504
13505 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
13506 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
13507 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, bRm & X86_MODRM_RM_MASK);
13508 IEM_MC_ELSE()
13509 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(bRm & X86_MODRM_RM_MASK);
13510 IEM_MC_ENDIF();
13511 IEM_MC_USED_FPU();
13512 IEM_MC_ADVANCE_RIP();
13513
13514 IEM_MC_END();
13515 return VINF_SUCCESS;
13516}
13517
13518
13519/** Opcode 0xd9 0xf3. */
13520FNIEMOP_DEF(iemOp_fpatan)
13521{
13522 IEMOP_MNEMONIC("fpatan st1,st0");
13523 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fpatan_r80_by_r80);
13524}
13525
13526
13527/** Opcode 0xd9 0xf4. */
13528FNIEMOP_DEF(iemOp_fxtract)
13529{
13530 IEMOP_MNEMONIC("fxtract st0");
13531 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fxtract_r80_r80);
13532}
13533
13534
13535/** Opcode 0xd9 0xf5. */
13536FNIEMOP_DEF(iemOp_fprem1)
13537{
13538 IEMOP_MNEMONIC("fprem1 st0, st1");
13539 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem1_r80_by_r80);
13540}
13541
13542
13543/** Opcode 0xd9 0xf6. */
13544FNIEMOP_DEF(iemOp_fdecstp)
13545{
13546 IEMOP_MNEMONIC("fdecstp");
13547 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13548 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
13549 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
13550 * FINCSTP and FDECSTP. */
13551
13552 IEM_MC_BEGIN(0,0);
13553
13554 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13555 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13556
13557 IEM_MC_FPU_STACK_DEC_TOP();
13558 IEM_MC_UPDATE_FSW_CONST(0);
13559
13560 IEM_MC_USED_FPU();
13561 IEM_MC_ADVANCE_RIP();
13562 IEM_MC_END();
13563 return VINF_SUCCESS;
13564}
13565
13566
13567/** Opcode 0xd9 0xf7. */
13568FNIEMOP_DEF(iemOp_fincstp)
13569{
13570 IEMOP_MNEMONIC("fincstp");
13571 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13572 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
13573 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
13574 * FINCSTP and FDECSTP. */
13575
13576 IEM_MC_BEGIN(0,0);
13577
13578 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13579 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13580
13581 IEM_MC_FPU_STACK_INC_TOP();
13582 IEM_MC_UPDATE_FSW_CONST(0);
13583
13584 IEM_MC_USED_FPU();
13585 IEM_MC_ADVANCE_RIP();
13586 IEM_MC_END();
13587 return VINF_SUCCESS;
13588}
13589
13590
13591/** Opcode 0xd9 0xf8. */
13592FNIEMOP_DEF(iemOp_fprem)
13593{
13594 IEMOP_MNEMONIC("fprem st0, st1");
13595 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem_r80_by_r80);
13596}
13597
13598
13599/** Opcode 0xd9 0xf9. */
13600FNIEMOP_DEF(iemOp_fyl2xp1)
13601{
13602 IEMOP_MNEMONIC("fyl2xp1 st1,st0");
13603 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fyl2xp1_r80_by_r80);
13604}
13605
13606
13607/** Opcode 0xd9 0xfa. */
13608FNIEMOP_DEF(iemOp_fsqrt)
13609{
13610 IEMOP_MNEMONIC("fsqrt st0");
13611 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsqrt_r80);
13612}
13613
13614
13615/** Opcode 0xd9 0xfb. */
13616FNIEMOP_DEF(iemOp_fsincos)
13617{
13618 IEMOP_MNEMONIC("fsincos st0");
13619 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fsincos_r80_r80);
13620}
13621
13622
13623/** Opcode 0xd9 0xfc. */
13624FNIEMOP_DEF(iemOp_frndint)
13625{
13626 IEMOP_MNEMONIC("frndint st0");
13627 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_frndint_r80);
13628}
13629
13630
13631/** Opcode 0xd9 0xfd. */
13632FNIEMOP_DEF(iemOp_fscale)
13633{
13634 IEMOP_MNEMONIC("fscale st0, st1");
13635 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fscale_r80_by_r80);
13636}
13637
13638
13639/** Opcode 0xd9 0xfe. */
13640FNIEMOP_DEF(iemOp_fsin)
13641{
13642 IEMOP_MNEMONIC("fsin st0");
13643 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsin_r80);
13644}
13645
13646
13647/** Opcode 0xd9 0xff. */
13648FNIEMOP_DEF(iemOp_fcos)
13649{
13650 IEMOP_MNEMONIC("fcos st0");
13651 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fcos_r80);
13652}
13653
13654
13655/** Used by iemOp_EscF1. */
13656static const PFNIEMOP g_apfnEscF1_E0toFF[32] =
13657{
13658 /* 0xe0 */ iemOp_fchs,
13659 /* 0xe1 */ iemOp_fabs,
13660 /* 0xe2 */ iemOp_Invalid,
13661 /* 0xe3 */ iemOp_Invalid,
13662 /* 0xe4 */ iemOp_ftst,
13663 /* 0xe5 */ iemOp_fxam,
13664 /* 0xe6 */ iemOp_Invalid,
13665 /* 0xe7 */ iemOp_Invalid,
13666 /* 0xe8 */ iemOp_fld1,
13667 /* 0xe9 */ iemOp_fldl2t,
13668 /* 0xea */ iemOp_fldl2e,
13669 /* 0xeb */ iemOp_fldpi,
13670 /* 0xec */ iemOp_fldlg2,
13671 /* 0xed */ iemOp_fldln2,
13672 /* 0xee */ iemOp_fldz,
13673 /* 0xef */ iemOp_Invalid,
13674 /* 0xf0 */ iemOp_f2xm1,
13675 /* 0xf1 */ iemOp_fylx2,
13676 /* 0xf2 */ iemOp_fptan,
13677 /* 0xf3 */ iemOp_fpatan,
13678 /* 0xf4 */ iemOp_fxtract,
13679 /* 0xf5 */ iemOp_fprem1,
13680 /* 0xf6 */ iemOp_fdecstp,
13681 /* 0xf7 */ iemOp_fincstp,
13682 /* 0xf8 */ iemOp_fprem,
13683 /* 0xf9 */ iemOp_fyl2xp1,
13684 /* 0xfa */ iemOp_fsqrt,
13685 /* 0xfb */ iemOp_fsincos,
13686 /* 0xfc */ iemOp_frndint,
13687 /* 0xfd */ iemOp_fscale,
13688 /* 0xfe */ iemOp_fsin,
13689 /* 0xff */ iemOp_fcos
13690};
13691
13692
13693/** Opcode 0xd9. */
13694FNIEMOP_DEF(iemOp_EscF1)
13695{
13696 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
13697 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
13698 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
13699 {
13700 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13701 {
13702 case 0: return FNIEMOP_CALL_1(iemOp_fld_stN, bRm);
13703 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm);
13704 case 2:
13705 if (bRm == 0xc9)
13706 return FNIEMOP_CALL(iemOp_fnop);
13707 return IEMOP_RAISE_INVALID_OPCODE();
13708 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved. Intel behavior seems to be FSTP ST(i) though. */
13709 case 4:
13710 case 5:
13711 case 6:
13712 case 7:
13713 Assert((unsigned)bRm - 0xe0U < RT_ELEMENTS(g_apfnEscF1_E0toFF));
13714 return FNIEMOP_CALL(g_apfnEscF1_E0toFF[bRm - 0xe0]);
13715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13716 }
13717 }
13718 else
13719 {
13720 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13721 {
13722 case 0: return FNIEMOP_CALL_1(iemOp_fld_m32r, bRm);
13723 case 1: return IEMOP_RAISE_INVALID_OPCODE();
13724 case 2: return FNIEMOP_CALL_1(iemOp_fst_m32r, bRm);
13725 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m32r, bRm);
13726 case 4: return FNIEMOP_CALL_1(iemOp_fldenv, bRm);
13727 case 5: return FNIEMOP_CALL_1(iemOp_fldcw, bRm);
13728 case 6: return FNIEMOP_CALL_1(iemOp_fnstenv, bRm);
13729 case 7: return FNIEMOP_CALL_1(iemOp_fnstcw, bRm);
13730 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13731 }
13732 }
13733}
13734
13735
13736/** Opcode 0xda 11/0. */
13737FNIEMOP_DEF_1(iemOp_fcmovb_stN, uint8_t, bRm)
13738{
13739 IEMOP_MNEMONIC("fcmovb st0,stN");
13740 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13741
13742 IEM_MC_BEGIN(0, 1);
13743 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13744
13745 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13746 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13747
13748 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13749 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF)
13750 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13751 IEM_MC_ENDIF();
13752 IEM_MC_UPDATE_FPU_OPCODE_IP();
13753 IEM_MC_ELSE()
13754 IEM_MC_FPU_STACK_UNDERFLOW(0);
13755 IEM_MC_ENDIF();
13756 IEM_MC_USED_FPU();
13757 IEM_MC_ADVANCE_RIP();
13758
13759 IEM_MC_END();
13760 return VINF_SUCCESS;
13761}
13762
13763
13764/** Opcode 0xda 11/1. */
13765FNIEMOP_DEF_1(iemOp_fcmove_stN, uint8_t, bRm)
13766{
13767 IEMOP_MNEMONIC("fcmove st0,stN");
13768 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13769
13770 IEM_MC_BEGIN(0, 1);
13771 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13772
13773 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13774 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13775
13776 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13777 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF)
13778 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13779 IEM_MC_ENDIF();
13780 IEM_MC_UPDATE_FPU_OPCODE_IP();
13781 IEM_MC_ELSE()
13782 IEM_MC_FPU_STACK_UNDERFLOW(0);
13783 IEM_MC_ENDIF();
13784 IEM_MC_USED_FPU();
13785 IEM_MC_ADVANCE_RIP();
13786
13787 IEM_MC_END();
13788 return VINF_SUCCESS;
13789}
13790
13791
13792/** Opcode 0xda 11/2. */
13793FNIEMOP_DEF_1(iemOp_fcmovbe_stN, uint8_t, bRm)
13794{
13795 IEMOP_MNEMONIC("fcmovbe st0,stN");
13796 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13797
13798 IEM_MC_BEGIN(0, 1);
13799 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13800
13801 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13802 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13803
13804 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13805 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
13806 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13807 IEM_MC_ENDIF();
13808 IEM_MC_UPDATE_FPU_OPCODE_IP();
13809 IEM_MC_ELSE()
13810 IEM_MC_FPU_STACK_UNDERFLOW(0);
13811 IEM_MC_ENDIF();
13812 IEM_MC_USED_FPU();
13813 IEM_MC_ADVANCE_RIP();
13814
13815 IEM_MC_END();
13816 return VINF_SUCCESS;
13817}
13818
13819
13820/** Opcode 0xda 11/3. */
13821FNIEMOP_DEF_1(iemOp_fcmovu_stN, uint8_t, bRm)
13822{
13823 IEMOP_MNEMONIC("fcmovu st0,stN");
13824 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13825
13826 IEM_MC_BEGIN(0, 1);
13827 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13828
13829 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13830 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13831
13832 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13833 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF)
13834 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13835 IEM_MC_ENDIF();
13836 IEM_MC_UPDATE_FPU_OPCODE_IP();
13837 IEM_MC_ELSE()
13838 IEM_MC_FPU_STACK_UNDERFLOW(0);
13839 IEM_MC_ENDIF();
13840 IEM_MC_USED_FPU();
13841 IEM_MC_ADVANCE_RIP();
13842
13843 IEM_MC_END();
13844 return VINF_SUCCESS;
13845}
13846
13847
13848/**
13849 * Common worker for FPU instructions working on ST0 and STn, only affecting
13850 * flags, and popping twice when done.
13851 *
13852 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13853 */
13854FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
13855{
13856 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13857
13858 IEM_MC_BEGIN(3, 1);
13859 IEM_MC_LOCAL(uint16_t, u16Fsw);
13860 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13861 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13862 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13863
13864 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13865 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13866 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, 1)
13867 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
13868 IEM_MC_UPDATE_FSW_THEN_POP_POP(u16Fsw);
13869 IEM_MC_ELSE()
13870 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP();
13871 IEM_MC_ENDIF();
13872 IEM_MC_USED_FPU();
13873 IEM_MC_ADVANCE_RIP();
13874
13875 IEM_MC_END();
13876 return VINF_SUCCESS;
13877}
13878
13879
13880/** Opcode 0xda 0xe9. */
13881FNIEMOP_DEF(iemOp_fucompp)
13882{
13883 IEMOP_MNEMONIC("fucompp st0,stN");
13884 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fucom_r80_by_r80);
13885}
13886
13887
13888/**
13889 * Common worker for FPU instructions working on ST0 and an m32i, and storing
13890 * the result in ST0.
13891 *
13892 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13893 */
13894FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32i, uint8_t, bRm, PFNIEMAIMPLFPUI32, pfnAImpl)
13895{
13896 IEM_MC_BEGIN(3, 3);
13897 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13898 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13899 IEM_MC_LOCAL(int32_t, i32Val2);
13900 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13901 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13902 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13903
13904 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13905 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13906
13907 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13908 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13909 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13910
13911 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13912 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi32Val2);
13913 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13914 IEM_MC_ELSE()
13915 IEM_MC_FPU_STACK_UNDERFLOW(0);
13916 IEM_MC_ENDIF();
13917 IEM_MC_USED_FPU();
13918 IEM_MC_ADVANCE_RIP();
13919
13920 IEM_MC_END();
13921 return VINF_SUCCESS;
13922}
13923
13924
13925/** Opcode 0xda !11/0. */
13926FNIEMOP_DEF_1(iemOp_fiadd_m32i, uint8_t, bRm)
13927{
13928 IEMOP_MNEMONIC("fiadd m32i");
13929 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fiadd_r80_by_i32);
13930}
13931
13932
13933/** Opcode 0xda !11/1. */
13934FNIEMOP_DEF_1(iemOp_fimul_m32i, uint8_t, bRm)
13935{
13936 IEMOP_MNEMONIC("fimul m32i");
13937 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fimul_r80_by_i32);
13938}
13939
13940
13941/** Opcode 0xda !11/2. */
13942FNIEMOP_DEF_1(iemOp_ficom_m32i, uint8_t, bRm)
13943{
13944 IEMOP_MNEMONIC("ficom st0,m32i");
13945
13946 IEM_MC_BEGIN(3, 3);
13947 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13948 IEM_MC_LOCAL(uint16_t, u16Fsw);
13949 IEM_MC_LOCAL(int32_t, i32Val2);
13950 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13951 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13952 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13953
13954 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13955 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13956
13957 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13958 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13959 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13960
13961 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13962 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
13963 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13964 IEM_MC_ELSE()
13965 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
13966 IEM_MC_ENDIF();
13967 IEM_MC_USED_FPU();
13968 IEM_MC_ADVANCE_RIP();
13969
13970 IEM_MC_END();
13971 return VINF_SUCCESS;
13972}
13973
13974
13975/** Opcode 0xda !11/3. */
13976FNIEMOP_DEF_1(iemOp_ficomp_m32i, uint8_t, bRm)
13977{
13978 IEMOP_MNEMONIC("ficomp st0,m32i");
13979
13980 IEM_MC_BEGIN(3, 3);
13981 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13982 IEM_MC_LOCAL(uint16_t, u16Fsw);
13983 IEM_MC_LOCAL(int32_t, i32Val2);
13984 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13985 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13986 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13987
13988 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13989 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13990
13991 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13992 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13993 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13994
13995 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13996 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
13997 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13998 IEM_MC_ELSE()
13999 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14000 IEM_MC_ENDIF();
14001 IEM_MC_USED_FPU();
14002 IEM_MC_ADVANCE_RIP();
14003
14004 IEM_MC_END();
14005 return VINF_SUCCESS;
14006}
14007
14008
14009/** Opcode 0xda !11/4. */
14010FNIEMOP_DEF_1(iemOp_fisub_m32i, uint8_t, bRm)
14011{
14012 IEMOP_MNEMONIC("fisub m32i");
14013 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisub_r80_by_i32);
14014}
14015
14016
14017/** Opcode 0xda !11/5. */
14018FNIEMOP_DEF_1(iemOp_fisubr_m32i, uint8_t, bRm)
14019{
14020 IEMOP_MNEMONIC("fisubr m32i");
14021 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisubr_r80_by_i32);
14022}
14023
14024
14025/** Opcode 0xda !11/6. */
14026FNIEMOP_DEF_1(iemOp_fidiv_m32i, uint8_t, bRm)
14027{
14028 IEMOP_MNEMONIC("fidiv m32i");
14029 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidiv_r80_by_i32);
14030}
14031
14032
14033/** Opcode 0xda !11/7. */
14034FNIEMOP_DEF_1(iemOp_fidivr_m32i, uint8_t, bRm)
14035{
14036 IEMOP_MNEMONIC("fidivr m32i");
14037 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidivr_r80_by_i32);
14038}
14039
14040
14041/** Opcode 0xda. */
14042FNIEMOP_DEF(iemOp_EscF2)
14043{
14044 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14045 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14046 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14047 {
14048 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14049 {
14050 case 0: return FNIEMOP_CALL_1(iemOp_fcmovb_stN, bRm);
14051 case 1: return FNIEMOP_CALL_1(iemOp_fcmove_stN, bRm);
14052 case 2: return FNIEMOP_CALL_1(iemOp_fcmovbe_stN, bRm);
14053 case 3: return FNIEMOP_CALL_1(iemOp_fcmovu_stN, bRm);
14054 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14055 case 5:
14056 if (bRm == 0xe9)
14057 return FNIEMOP_CALL(iemOp_fucompp);
14058 return IEMOP_RAISE_INVALID_OPCODE();
14059 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14060 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14061 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14062 }
14063 }
14064 else
14065 {
14066 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14067 {
14068 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m32i, bRm);
14069 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m32i, bRm);
14070 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m32i, bRm);
14071 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m32i, bRm);
14072 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m32i, bRm);
14073 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m32i, bRm);
14074 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m32i, bRm);
14075 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m32i, bRm);
14076 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14077 }
14078 }
14079}
14080
14081
14082/** Opcode 0xdb !11/0. */
14083FNIEMOP_DEF_1(iemOp_fild_m32i, uint8_t, bRm)
14084{
14085 IEMOP_MNEMONIC("fild m32i");
14086
14087 IEM_MC_BEGIN(2, 3);
14088 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14089 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14090 IEM_MC_LOCAL(int32_t, i32Val);
14091 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14092 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val, i32Val, 1);
14093
14094 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14095 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14096
14097 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14098 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14099 IEM_MC_FETCH_MEM_I32(i32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14100
14101 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14102 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i32_to_r80, pFpuRes, pi32Val);
14103 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14104 IEM_MC_ELSE()
14105 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14106 IEM_MC_ENDIF();
14107 IEM_MC_USED_FPU();
14108 IEM_MC_ADVANCE_RIP();
14109
14110 IEM_MC_END();
14111 return VINF_SUCCESS;
14112}
14113
14114
14115/** Opcode 0xdb !11/1. */
14116FNIEMOP_DEF_1(iemOp_fisttp_m32i, uint8_t, bRm)
14117{
14118 IEMOP_MNEMONIC("fisttp m32i");
14119 IEM_MC_BEGIN(3, 2);
14120 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14121 IEM_MC_LOCAL(uint16_t, u16Fsw);
14122 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14123 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14124 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14125
14126 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14127 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14128 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14129 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14130
14131 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14132 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14133 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14134 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14135 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14136 IEM_MC_ELSE()
14137 IEM_MC_IF_FCW_IM()
14138 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14139 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14140 IEM_MC_ENDIF();
14141 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14142 IEM_MC_ENDIF();
14143 IEM_MC_USED_FPU();
14144 IEM_MC_ADVANCE_RIP();
14145
14146 IEM_MC_END();
14147 return VINF_SUCCESS;
14148}
14149
14150
14151/** Opcode 0xdb !11/2. */
14152FNIEMOP_DEF_1(iemOp_fist_m32i, uint8_t, bRm)
14153{
14154 IEMOP_MNEMONIC("fist m32i");
14155 IEM_MC_BEGIN(3, 2);
14156 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14157 IEM_MC_LOCAL(uint16_t, u16Fsw);
14158 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14159 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14160 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14161
14162 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14163 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14164 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14165 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14166
14167 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14168 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14169 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14170 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14171 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14172 IEM_MC_ELSE()
14173 IEM_MC_IF_FCW_IM()
14174 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14175 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14176 IEM_MC_ENDIF();
14177 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14178 IEM_MC_ENDIF();
14179 IEM_MC_USED_FPU();
14180 IEM_MC_ADVANCE_RIP();
14181
14182 IEM_MC_END();
14183 return VINF_SUCCESS;
14184}
14185
14186
14187/** Opcode 0xdb !11/3. */
14188FNIEMOP_DEF_1(iemOp_fistp_m32i, uint8_t, bRm)
14189{
14190 IEMOP_MNEMONIC("fisttp m32i");
14191 IEM_MC_BEGIN(3, 2);
14192 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14193 IEM_MC_LOCAL(uint16_t, u16Fsw);
14194 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14195 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14196 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14197
14198 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14199 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14200 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14201 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14202
14203 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14204 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14205 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14206 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14207 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14208 IEM_MC_ELSE()
14209 IEM_MC_IF_FCW_IM()
14210 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14211 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14212 IEM_MC_ENDIF();
14213 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14214 IEM_MC_ENDIF();
14215 IEM_MC_USED_FPU();
14216 IEM_MC_ADVANCE_RIP();
14217
14218 IEM_MC_END();
14219 return VINF_SUCCESS;
14220}
14221
14222
14223/** Opcode 0xdb !11/5. */
14224FNIEMOP_DEF_1(iemOp_fld_m80r, uint8_t, bRm)
14225{
14226 IEMOP_MNEMONIC("fld m80r");
14227
14228 IEM_MC_BEGIN(2, 3);
14229 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14230 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14231 IEM_MC_LOCAL(RTFLOAT80U, r80Val);
14232 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14233 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT80U, pr80Val, r80Val, 1);
14234
14235 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14236 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14237
14238 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14239 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14240 IEM_MC_FETCH_MEM_R80(r80Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14241
14242 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14243 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r80_from_r80, pFpuRes, pr80Val);
14244 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14245 IEM_MC_ELSE()
14246 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14247 IEM_MC_ENDIF();
14248 IEM_MC_USED_FPU();
14249 IEM_MC_ADVANCE_RIP();
14250
14251 IEM_MC_END();
14252 return VINF_SUCCESS;
14253}
14254
14255
14256/** Opcode 0xdb !11/7. */
14257FNIEMOP_DEF_1(iemOp_fstp_m80r, uint8_t, bRm)
14258{
14259 IEMOP_MNEMONIC("fstp m80r");
14260 IEM_MC_BEGIN(3, 2);
14261 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14262 IEM_MC_LOCAL(uint16_t, u16Fsw);
14263 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14264 IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1);
14265 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14266
14267 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14268 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14269 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14270 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14271
14272 IEM_MC_MEM_MAP(pr80Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14273 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14274 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r80, pu16Fsw, pr80Dst, pr80Value);
14275 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr80Dst, IEM_ACCESS_DATA_W, u16Fsw);
14276 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14277 IEM_MC_ELSE()
14278 IEM_MC_IF_FCW_IM()
14279 IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(pr80Dst);
14280 IEM_MC_MEM_COMMIT_AND_UNMAP(pr80Dst, IEM_ACCESS_DATA_W);
14281 IEM_MC_ENDIF();
14282 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14283 IEM_MC_ENDIF();
14284 IEM_MC_USED_FPU();
14285 IEM_MC_ADVANCE_RIP();
14286
14287 IEM_MC_END();
14288 return VINF_SUCCESS;
14289}
14290
14291
14292/** Opcode 0xdb 11/0. */
14293FNIEMOP_DEF_1(iemOp_fcmovnb_stN, uint8_t, bRm)
14294{
14295 IEMOP_MNEMONIC("fcmovnb st0,stN");
14296 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14297
14298 IEM_MC_BEGIN(0, 1);
14299 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14300
14301 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14302 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14303
14304 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14305 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF)
14306 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14307 IEM_MC_ENDIF();
14308 IEM_MC_UPDATE_FPU_OPCODE_IP();
14309 IEM_MC_ELSE()
14310 IEM_MC_FPU_STACK_UNDERFLOW(0);
14311 IEM_MC_ENDIF();
14312 IEM_MC_USED_FPU();
14313 IEM_MC_ADVANCE_RIP();
14314
14315 IEM_MC_END();
14316 return VINF_SUCCESS;
14317}
14318
14319
14320/** Opcode 0xdb 11/1. */
14321FNIEMOP_DEF_1(iemOp_fcmovne_stN, uint8_t, bRm)
14322{
14323 IEMOP_MNEMONIC("fcmovne st0,stN");
14324 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14325
14326 IEM_MC_BEGIN(0, 1);
14327 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14328
14329 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14330 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14331
14332 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14333 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
14334 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14335 IEM_MC_ENDIF();
14336 IEM_MC_UPDATE_FPU_OPCODE_IP();
14337 IEM_MC_ELSE()
14338 IEM_MC_FPU_STACK_UNDERFLOW(0);
14339 IEM_MC_ENDIF();
14340 IEM_MC_USED_FPU();
14341 IEM_MC_ADVANCE_RIP();
14342
14343 IEM_MC_END();
14344 return VINF_SUCCESS;
14345}
14346
14347
14348/** Opcode 0xdb 11/2. */
14349FNIEMOP_DEF_1(iemOp_fcmovnbe_stN, uint8_t, bRm)
14350{
14351 IEMOP_MNEMONIC("fcmovnbe st0,stN");
14352 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14353
14354 IEM_MC_BEGIN(0, 1);
14355 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14356
14357 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14358 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14359
14360 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14361 IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
14362 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14363 IEM_MC_ENDIF();
14364 IEM_MC_UPDATE_FPU_OPCODE_IP();
14365 IEM_MC_ELSE()
14366 IEM_MC_FPU_STACK_UNDERFLOW(0);
14367 IEM_MC_ENDIF();
14368 IEM_MC_USED_FPU();
14369 IEM_MC_ADVANCE_RIP();
14370
14371 IEM_MC_END();
14372 return VINF_SUCCESS;
14373}
14374
14375
14376/** Opcode 0xdb 11/3. */
14377FNIEMOP_DEF_1(iemOp_fcmovnnu_stN, uint8_t, bRm)
14378{
14379 IEMOP_MNEMONIC("fcmovnnu st0,stN");
14380 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14381
14382 IEM_MC_BEGIN(0, 1);
14383 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14384
14385 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14386 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14387
14388 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14389 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF)
14390 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14391 IEM_MC_ENDIF();
14392 IEM_MC_UPDATE_FPU_OPCODE_IP();
14393 IEM_MC_ELSE()
14394 IEM_MC_FPU_STACK_UNDERFLOW(0);
14395 IEM_MC_ENDIF();
14396 IEM_MC_USED_FPU();
14397 IEM_MC_ADVANCE_RIP();
14398
14399 IEM_MC_END();
14400 return VINF_SUCCESS;
14401}
14402
14403
14404/** Opcode 0xdb 0xe0. */
14405FNIEMOP_DEF(iemOp_fneni)
14406{
14407 IEMOP_MNEMONIC("fneni (8087/ign)");
14408 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14409 IEM_MC_BEGIN(0,0);
14410 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14411 IEM_MC_ADVANCE_RIP();
14412 IEM_MC_END();
14413 return VINF_SUCCESS;
14414}
14415
14416
14417/** Opcode 0xdb 0xe1. */
14418FNIEMOP_DEF(iemOp_fndisi)
14419{
14420 IEMOP_MNEMONIC("fndisi (8087/ign)");
14421 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14422 IEM_MC_BEGIN(0,0);
14423 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14424 IEM_MC_ADVANCE_RIP();
14425 IEM_MC_END();
14426 return VINF_SUCCESS;
14427}
14428
14429
14430/** Opcode 0xdb 0xe2. */
14431FNIEMOP_DEF(iemOp_fnclex)
14432{
14433 IEMOP_MNEMONIC("fnclex");
14434 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14435
14436 IEM_MC_BEGIN(0,0);
14437 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14438 IEM_MC_CLEAR_FSW_EX();
14439 IEM_MC_ADVANCE_RIP();
14440 IEM_MC_END();
14441 return VINF_SUCCESS;
14442}
14443
14444
14445/** Opcode 0xdb 0xe3. */
14446FNIEMOP_DEF(iemOp_fninit)
14447{
14448 IEMOP_MNEMONIC("fninit");
14449 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14450 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
14451}
14452
14453
14454/** Opcode 0xdb 0xe4. */
14455FNIEMOP_DEF(iemOp_fnsetpm)
14456{
14457 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
14458 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14459 IEM_MC_BEGIN(0,0);
14460 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14461 IEM_MC_ADVANCE_RIP();
14462 IEM_MC_END();
14463 return VINF_SUCCESS;
14464}
14465
14466
14467/** Opcode 0xdb 0xe5. */
14468FNIEMOP_DEF(iemOp_frstpm)
14469{
14470 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
14471#if 0 /* #UDs on newer CPUs */
14472 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14473 IEM_MC_BEGIN(0,0);
14474 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14475 IEM_MC_ADVANCE_RIP();
14476 IEM_MC_END();
14477 return VINF_SUCCESS;
14478#else
14479 return IEMOP_RAISE_INVALID_OPCODE();
14480#endif
14481}
14482
14483
14484/** Opcode 0xdb 11/5. */
14485FNIEMOP_DEF_1(iemOp_fucomi_stN, uint8_t, bRm)
14486{
14487 IEMOP_MNEMONIC("fucomi st0,stN");
14488 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fucomi_r80_by_r80, false /*fPop*/);
14489}
14490
14491
14492/** Opcode 0xdb 11/6. */
14493FNIEMOP_DEF_1(iemOp_fcomi_stN, uint8_t, bRm)
14494{
14495 IEMOP_MNEMONIC("fcomi st0,stN");
14496 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, false /*fPop*/);
14497}
14498
14499
14500/** Opcode 0xdb. */
14501FNIEMOP_DEF(iemOp_EscF3)
14502{
14503 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14504 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14505 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14506 {
14507 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14508 {
14509 case 0: FNIEMOP_CALL_1(iemOp_fcmovnb_stN, bRm);
14510 case 1: FNIEMOP_CALL_1(iemOp_fcmovne_stN, bRm);
14511 case 2: FNIEMOP_CALL_1(iemOp_fcmovnbe_stN, bRm);
14512 case 3: FNIEMOP_CALL_1(iemOp_fcmovnnu_stN, bRm);
14513 case 4:
14514 switch (bRm)
14515 {
14516 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
14517 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
14518 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
14519 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
14520 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
14521 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
14522 case 0xe6: return IEMOP_RAISE_INVALID_OPCODE();
14523 case 0xe7: return IEMOP_RAISE_INVALID_OPCODE();
14524 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14525 }
14526 break;
14527 case 5: return FNIEMOP_CALL_1(iemOp_fucomi_stN, bRm);
14528 case 6: return FNIEMOP_CALL_1(iemOp_fcomi_stN, bRm);
14529 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14531 }
14532 }
14533 else
14534 {
14535 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14536 {
14537 case 0: return FNIEMOP_CALL_1(iemOp_fild_m32i, bRm);
14538 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m32i,bRm);
14539 case 2: return FNIEMOP_CALL_1(iemOp_fist_m32i, bRm);
14540 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m32i, bRm);
14541 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14542 case 5: return FNIEMOP_CALL_1(iemOp_fld_m80r, bRm);
14543 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14544 case 7: return FNIEMOP_CALL_1(iemOp_fstp_m80r, bRm);
14545 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14546 }
14547 }
14548}
14549
14550
14551/**
14552 * Common worker for FPU instructions working on STn and ST0, and storing the
14553 * result in STn unless IE, DE or ZE was raised.
14554 *
14555 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14556 */
14557FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
14558{
14559 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14560
14561 IEM_MC_BEGIN(3, 1);
14562 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14563 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14564 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14565 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
14566
14567 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14568 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14569
14570 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
14571 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
14572 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
14573 IEM_MC_ELSE()
14574 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
14575 IEM_MC_ENDIF();
14576 IEM_MC_USED_FPU();
14577 IEM_MC_ADVANCE_RIP();
14578
14579 IEM_MC_END();
14580 return VINF_SUCCESS;
14581}
14582
14583
14584/** Opcode 0xdc 11/0. */
14585FNIEMOP_DEF_1(iemOp_fadd_stN_st0, uint8_t, bRm)
14586{
14587 IEMOP_MNEMONIC("fadd stN,st0");
14588 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fadd_r80_by_r80);
14589}
14590
14591
14592/** Opcode 0xdc 11/1. */
14593FNIEMOP_DEF_1(iemOp_fmul_stN_st0, uint8_t, bRm)
14594{
14595 IEMOP_MNEMONIC("fmul stN,st0");
14596 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fmul_r80_by_r80);
14597}
14598
14599
14600/** Opcode 0xdc 11/4. */
14601FNIEMOP_DEF_1(iemOp_fsubr_stN_st0, uint8_t, bRm)
14602{
14603 IEMOP_MNEMONIC("fsubr stN,st0");
14604 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsubr_r80_by_r80);
14605}
14606
14607
14608/** Opcode 0xdc 11/5. */
14609FNIEMOP_DEF_1(iemOp_fsub_stN_st0, uint8_t, bRm)
14610{
14611 IEMOP_MNEMONIC("fsub stN,st0");
14612 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsub_r80_by_r80);
14613}
14614
14615
14616/** Opcode 0xdc 11/6. */
14617FNIEMOP_DEF_1(iemOp_fdivr_stN_st0, uint8_t, bRm)
14618{
14619 IEMOP_MNEMONIC("fdivr stN,st0");
14620 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdivr_r80_by_r80);
14621}
14622
14623
14624/** Opcode 0xdc 11/7. */
14625FNIEMOP_DEF_1(iemOp_fdiv_stN_st0, uint8_t, bRm)
14626{
14627 IEMOP_MNEMONIC("fdiv stN,st0");
14628 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdiv_r80_by_r80);
14629}
14630
14631
14632/**
14633 * Common worker for FPU instructions working on ST0 and a 64-bit floating point
14634 * memory operand, and storing the result in ST0.
14635 *
14636 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14637 */
14638FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl)
14639{
14640 IEM_MC_BEGIN(3, 3);
14641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14642 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14643 IEM_MC_LOCAL(RTFLOAT64U, r64Factor2);
14644 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14645 IEM_MC_ARG(PCRTFLOAT80U, pr80Factor1, 1);
14646 IEM_MC_ARG_LOCAL_REF(PRTFLOAT64U, pr64Factor2, r64Factor2, 2);
14647
14648 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14649 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14650 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14651 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14652
14653 IEM_MC_FETCH_MEM_R64(r64Factor2, pIemCpu->iEffSeg, GCPtrEffSrc);
14654 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Factor1, 0)
14655 IEM_MC_CALL_FPU_AIMPL_3(pfnImpl, pFpuRes, pr80Factor1, pr64Factor2);
14656 IEM_MC_STORE_FPU_RESULT_MEM_OP(FpuRes, 0, pIemCpu->iEffSeg, GCPtrEffSrc);
14657 IEM_MC_ELSE()
14658 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(0, pIemCpu->iEffSeg, GCPtrEffSrc);
14659 IEM_MC_ENDIF();
14660 IEM_MC_USED_FPU();
14661 IEM_MC_ADVANCE_RIP();
14662
14663 IEM_MC_END();
14664 return VINF_SUCCESS;
14665}
14666
14667
14668/** Opcode 0xdc !11/0. */
14669FNIEMOP_DEF_1(iemOp_fadd_m64r, uint8_t, bRm)
14670{
14671 IEMOP_MNEMONIC("fadd m64r");
14672 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fadd_r80_by_r64);
14673}
14674
14675
14676/** Opcode 0xdc !11/1. */
14677FNIEMOP_DEF_1(iemOp_fmul_m64r, uint8_t, bRm)
14678{
14679 IEMOP_MNEMONIC("fmul m64r");
14680 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fmul_r80_by_r64);
14681}
14682
14683
14684/** Opcode 0xdc !11/2. */
14685FNIEMOP_DEF_1(iemOp_fcom_m64r, uint8_t, bRm)
14686{
14687 IEMOP_MNEMONIC("fcom st0,m64r");
14688
14689 IEM_MC_BEGIN(3, 3);
14690 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14691 IEM_MC_LOCAL(uint16_t, u16Fsw);
14692 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
14693 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14694 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14695 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
14696
14697 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14698 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14699
14700 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14701 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14702 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14703
14704 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14705 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
14706 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14707 IEM_MC_ELSE()
14708 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14709 IEM_MC_ENDIF();
14710 IEM_MC_USED_FPU();
14711 IEM_MC_ADVANCE_RIP();
14712
14713 IEM_MC_END();
14714 return VINF_SUCCESS;
14715}
14716
14717
14718/** Opcode 0xdc !11/3. */
14719FNIEMOP_DEF_1(iemOp_fcomp_m64r, uint8_t, bRm)
14720{
14721 IEMOP_MNEMONIC("fcomp st0,m64r");
14722
14723 IEM_MC_BEGIN(3, 3);
14724 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14725 IEM_MC_LOCAL(uint16_t, u16Fsw);
14726 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
14727 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14728 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14729 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
14730
14731 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14732 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14733
14734 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14735 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14736 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14737
14738 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14739 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
14740 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14741 IEM_MC_ELSE()
14742 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14743 IEM_MC_ENDIF();
14744 IEM_MC_USED_FPU();
14745 IEM_MC_ADVANCE_RIP();
14746
14747 IEM_MC_END();
14748 return VINF_SUCCESS;
14749}
14750
14751
14752/** Opcode 0xdc !11/4. */
14753FNIEMOP_DEF_1(iemOp_fsub_m64r, uint8_t, bRm)
14754{
14755 IEMOP_MNEMONIC("fsub m64r");
14756 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsub_r80_by_r64);
14757}
14758
14759
14760/** Opcode 0xdc !11/5. */
14761FNIEMOP_DEF_1(iemOp_fsubr_m64r, uint8_t, bRm)
14762{
14763 IEMOP_MNEMONIC("fsubr m64r");
14764 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsubr_r80_by_r64);
14765}
14766
14767
14768/** Opcode 0xdc !11/6. */
14769FNIEMOP_DEF_1(iemOp_fdiv_m64r, uint8_t, bRm)
14770{
14771 IEMOP_MNEMONIC("fdiv m64r");
14772 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdiv_r80_by_r64);
14773}
14774
14775
14776/** Opcode 0xdc !11/7. */
14777FNIEMOP_DEF_1(iemOp_fdivr_m64r, uint8_t, bRm)
14778{
14779 IEMOP_MNEMONIC("fdivr m64r");
14780 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdivr_r80_by_r64);
14781}
14782
14783
14784/** Opcode 0xdc. */
14785FNIEMOP_DEF(iemOp_EscF4)
14786{
14787 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14788 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14789 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14790 {
14791 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14792 {
14793 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN_st0, bRm);
14794 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN_st0, bRm);
14795 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm); /* Marked reserved, intel behavior is that of FCOM ST(i). */
14796 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm); /* Marked reserved, intel behavior is that of FCOMP ST(i). */
14797 case 4: return FNIEMOP_CALL_1(iemOp_fsubr_stN_st0, bRm);
14798 case 5: return FNIEMOP_CALL_1(iemOp_fsub_stN_st0, bRm);
14799 case 6: return FNIEMOP_CALL_1(iemOp_fdivr_stN_st0, bRm);
14800 case 7: return FNIEMOP_CALL_1(iemOp_fdiv_stN_st0, bRm);
14801 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14802 }
14803 }
14804 else
14805 {
14806 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14807 {
14808 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m64r, bRm);
14809 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m64r, bRm);
14810 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m64r, bRm);
14811 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m64r, bRm);
14812 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m64r, bRm);
14813 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m64r, bRm);
14814 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m64r, bRm);
14815 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m64r, bRm);
14816 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14817 }
14818 }
14819}
14820
14821
14822/** Opcode 0xdd !11/0.
14823 * @sa iemOp_fld_m32r */
14824FNIEMOP_DEF_1(iemOp_fld_m64r, uint8_t, bRm)
14825{
14826 IEMOP_MNEMONIC("fld m64r");
14827
14828 IEM_MC_BEGIN(2, 3);
14829 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14830 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14831 IEM_MC_LOCAL(RTFLOAT64U, r64Val);
14832 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14833 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val, r64Val, 1);
14834
14835 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14836 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14837 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14838 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14839
14840 IEM_MC_FETCH_MEM_R64(r64Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14841 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14842 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r64_to_r80, pFpuRes, pr64Val);
14843 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14844 IEM_MC_ELSE()
14845 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14846 IEM_MC_ENDIF();
14847 IEM_MC_USED_FPU();
14848 IEM_MC_ADVANCE_RIP();
14849
14850 IEM_MC_END();
14851 return VINF_SUCCESS;
14852}
14853
14854
14855/** Opcode 0xdd !11/0. */
14856FNIEMOP_DEF_1(iemOp_fisttp_m64i, uint8_t, bRm)
14857{
14858 IEMOP_MNEMONIC("fisttp m64i");
14859 IEM_MC_BEGIN(3, 2);
14860 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14861 IEM_MC_LOCAL(uint16_t, u16Fsw);
14862 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14863 IEM_MC_ARG(int64_t *, pi64Dst, 1);
14864 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14865
14866 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14867 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14868 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14869 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14870
14871 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14872 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14873 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
14874 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14875 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14876 IEM_MC_ELSE()
14877 IEM_MC_IF_FCW_IM()
14878 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
14879 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
14880 IEM_MC_ENDIF();
14881 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14882 IEM_MC_ENDIF();
14883 IEM_MC_USED_FPU();
14884 IEM_MC_ADVANCE_RIP();
14885
14886 IEM_MC_END();
14887 return VINF_SUCCESS;
14888}
14889
14890
14891/** Opcode 0xdd !11/0. */
14892FNIEMOP_DEF_1(iemOp_fst_m64r, uint8_t, bRm)
14893{
14894 IEMOP_MNEMONIC("fst m64r");
14895 IEM_MC_BEGIN(3, 2);
14896 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14897 IEM_MC_LOCAL(uint16_t, u16Fsw);
14898 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14899 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
14900 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14901
14902 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14903 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14904 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14905 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14906
14907 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14908 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14909 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
14910 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14911 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14912 IEM_MC_ELSE()
14913 IEM_MC_IF_FCW_IM()
14914 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
14915 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
14916 IEM_MC_ENDIF();
14917 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14918 IEM_MC_ENDIF();
14919 IEM_MC_USED_FPU();
14920 IEM_MC_ADVANCE_RIP();
14921
14922 IEM_MC_END();
14923 return VINF_SUCCESS;
14924}
14925
14926
14927
14928
14929/** Opcode 0xdd !11/0. */
14930FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm)
14931{
14932 IEMOP_MNEMONIC("fstp m64r");
14933 IEM_MC_BEGIN(3, 2);
14934 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14935 IEM_MC_LOCAL(uint16_t, u16Fsw);
14936 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14937 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
14938 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14939
14940 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14941 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14942 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14943 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14944
14945 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14946 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14947 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
14948 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14949 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14950 IEM_MC_ELSE()
14951 IEM_MC_IF_FCW_IM()
14952 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
14953 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
14954 IEM_MC_ENDIF();
14955 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14956 IEM_MC_ENDIF();
14957 IEM_MC_USED_FPU();
14958 IEM_MC_ADVANCE_RIP();
14959
14960 IEM_MC_END();
14961 return VINF_SUCCESS;
14962}
14963
14964
14965/** Opcode 0xdd !11/0. */
14966FNIEMOP_DEF_1(iemOp_frstor, uint8_t, bRm)
14967{
14968 IEMOP_MNEMONIC("fxrstor m94/108byte");
14969 IEM_MC_BEGIN(3, 0);
14970 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
14971 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
14972 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
14973 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14974 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14975 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14976 IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc);
14977 IEM_MC_END();
14978 return VINF_SUCCESS;
14979}
14980
14981
14982/** Opcode 0xdd !11/0. */
14983FNIEMOP_DEF_1(iemOp_fnsave, uint8_t, bRm)
14984{
14985 IEMOP_MNEMONIC("fnsave m94/108byte");
14986 IEM_MC_BEGIN(3, 0);
14987 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
14988 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
14989 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
14990 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14991 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14992 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14993 IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst);
14994 IEM_MC_END();
14995 return VINF_SUCCESS;
14996
14997}
14998
14999/** Opcode 0xdd !11/0. */
15000FNIEMOP_DEF_1(iemOp_fnstsw, uint8_t, bRm)
15001{
15002 IEMOP_MNEMONIC("fnstsw m16");
15003
15004 IEM_MC_BEGIN(0, 2);
15005 IEM_MC_LOCAL(uint16_t, u16Tmp);
15006 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15007
15008 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15009 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15010 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15011
15012 IEM_MC_FETCH_FSW(u16Tmp);
15013 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
15014 IEM_MC_ADVANCE_RIP();
15015
15016/** @todo Debug / drop a hint to the verifier that things may differ
15017 * from REM. Seen 0x4020 (iem) vs 0x4000 (rem) at 0008:801c6b88 booting
15018 * NT4SP1. (X86_FSW_PE) */
15019 IEM_MC_END();
15020 return VINF_SUCCESS;
15021}
15022
15023
15024/** Opcode 0xdd 11/0. */
15025FNIEMOP_DEF_1(iemOp_ffree_stN, uint8_t, bRm)
15026{
15027 IEMOP_MNEMONIC("ffree stN");
15028 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15029 /* Note! C0, C1, C2 and C3 are documented as undefined, we leave the
15030 unmodified. */
15031
15032 IEM_MC_BEGIN(0, 0);
15033
15034 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15035 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15036
15037 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15038 IEM_MC_UPDATE_FPU_OPCODE_IP();
15039
15040 IEM_MC_USED_FPU();
15041 IEM_MC_ADVANCE_RIP();
15042 IEM_MC_END();
15043 return VINF_SUCCESS;
15044}
15045
15046
15047/** Opcode 0xdd 11/1. */
15048FNIEMOP_DEF_1(iemOp_fst_stN, uint8_t, bRm)
15049{
15050 IEMOP_MNEMONIC("fst st0,stN");
15051 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15052
15053 IEM_MC_BEGIN(0, 2);
15054 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
15055 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15056 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15057 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15058 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15059 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
15060 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
15061 IEM_MC_ELSE()
15062 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
15063 IEM_MC_ENDIF();
15064 IEM_MC_USED_FPU();
15065 IEM_MC_ADVANCE_RIP();
15066 IEM_MC_END();
15067 return VINF_SUCCESS;
15068}
15069
15070
15071/** Opcode 0xdd 11/3. */
15072FNIEMOP_DEF_1(iemOp_fucom_stN_st0, uint8_t, bRm)
15073{
15074 IEMOP_MNEMONIC("fcom st0,stN");
15075 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fucom_r80_by_r80);
15076}
15077
15078
15079/** Opcode 0xdd 11/4. */
15080FNIEMOP_DEF_1(iemOp_fucomp_stN, uint8_t, bRm)
15081{
15082 IEMOP_MNEMONIC("fcomp st0,stN");
15083 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fucom_r80_by_r80);
15084}
15085
15086
15087/** Opcode 0xdd. */
15088FNIEMOP_DEF(iemOp_EscF5)
15089{
15090 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15091 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15092 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15093 {
15094 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15095 {
15096 case 0: return FNIEMOP_CALL_1(iemOp_ffree_stN, bRm);
15097 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, intel behavior is that of XCHG ST(i). */
15098 case 2: return FNIEMOP_CALL_1(iemOp_fst_stN, bRm);
15099 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm);
15100 case 4: return FNIEMOP_CALL_1(iemOp_fucom_stN_st0,bRm);
15101 case 5: return FNIEMOP_CALL_1(iemOp_fucomp_stN, bRm);
15102 case 6: return IEMOP_RAISE_INVALID_OPCODE();
15103 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15104 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15105 }
15106 }
15107 else
15108 {
15109 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15110 {
15111 case 0: return FNIEMOP_CALL_1(iemOp_fld_m64r, bRm);
15112 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m64i, bRm);
15113 case 2: return FNIEMOP_CALL_1(iemOp_fst_m64r, bRm);
15114 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m64r, bRm);
15115 case 4: return FNIEMOP_CALL_1(iemOp_frstor, bRm);
15116 case 5: return IEMOP_RAISE_INVALID_OPCODE();
15117 case 6: return FNIEMOP_CALL_1(iemOp_fnsave, bRm);
15118 case 7: return FNIEMOP_CALL_1(iemOp_fnstsw, bRm);
15119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15120 }
15121 }
15122}
15123
15124
15125/** Opcode 0xde 11/0. */
15126FNIEMOP_DEF_1(iemOp_faddp_stN_st0, uint8_t, bRm)
15127{
15128 IEMOP_MNEMONIC("faddp stN,st0");
15129 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fadd_r80_by_r80);
15130}
15131
15132
15133/** Opcode 0xde 11/0. */
15134FNIEMOP_DEF_1(iemOp_fmulp_stN_st0, uint8_t, bRm)
15135{
15136 IEMOP_MNEMONIC("fmulp stN,st0");
15137 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fmul_r80_by_r80);
15138}
15139
15140
15141/** Opcode 0xde 0xd9. */
15142FNIEMOP_DEF(iemOp_fcompp)
15143{
15144 IEMOP_MNEMONIC("fucompp st0,stN");
15145 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fcom_r80_by_r80);
15146}
15147
15148
15149/** Opcode 0xde 11/4. */
15150FNIEMOP_DEF_1(iemOp_fsubrp_stN_st0, uint8_t, bRm)
15151{
15152 IEMOP_MNEMONIC("fsubrp stN,st0");
15153 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsubr_r80_by_r80);
15154}
15155
15156
15157/** Opcode 0xde 11/5. */
15158FNIEMOP_DEF_1(iemOp_fsubp_stN_st0, uint8_t, bRm)
15159{
15160 IEMOP_MNEMONIC("fsubp stN,st0");
15161 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsub_r80_by_r80);
15162}
15163
15164
15165/** Opcode 0xde 11/6. */
15166FNIEMOP_DEF_1(iemOp_fdivrp_stN_st0, uint8_t, bRm)
15167{
15168 IEMOP_MNEMONIC("fdivrp stN,st0");
15169 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdivr_r80_by_r80);
15170}
15171
15172
15173/** Opcode 0xde 11/7. */
15174FNIEMOP_DEF_1(iemOp_fdivp_stN_st0, uint8_t, bRm)
15175{
15176 IEMOP_MNEMONIC("fdivp stN,st0");
15177 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdiv_r80_by_r80);
15178}
15179
15180
15181/**
15182 * Common worker for FPU instructions working on ST0 and an m16i, and storing
15183 * the result in ST0.
15184 *
15185 * @param pfnAImpl Pointer to the instruction implementation (assembly).
15186 */
15187FNIEMOP_DEF_2(iemOpHlpFpu_st0_m16i, uint8_t, bRm, PFNIEMAIMPLFPUI16, pfnAImpl)
15188{
15189 IEM_MC_BEGIN(3, 3);
15190 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15191 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15192 IEM_MC_LOCAL(int16_t, i16Val2);
15193 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15194 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15195 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15196
15197 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15198 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15199
15200 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15201 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15202 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15203
15204 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15205 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi16Val2);
15206 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
15207 IEM_MC_ELSE()
15208 IEM_MC_FPU_STACK_UNDERFLOW(0);
15209 IEM_MC_ENDIF();
15210 IEM_MC_USED_FPU();
15211 IEM_MC_ADVANCE_RIP();
15212
15213 IEM_MC_END();
15214 return VINF_SUCCESS;
15215}
15216
15217
15218/** Opcode 0xde !11/0. */
15219FNIEMOP_DEF_1(iemOp_fiadd_m16i, uint8_t, bRm)
15220{
15221 IEMOP_MNEMONIC("fiadd m16i");
15222 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fiadd_r80_by_i16);
15223}
15224
15225
15226/** Opcode 0xde !11/1. */
15227FNIEMOP_DEF_1(iemOp_fimul_m16i, uint8_t, bRm)
15228{
15229 IEMOP_MNEMONIC("fimul m16i");
15230 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fimul_r80_by_i16);
15231}
15232
15233
15234/** Opcode 0xde !11/2. */
15235FNIEMOP_DEF_1(iemOp_ficom_m16i, uint8_t, bRm)
15236{
15237 IEMOP_MNEMONIC("ficom st0,m16i");
15238
15239 IEM_MC_BEGIN(3, 3);
15240 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15241 IEM_MC_LOCAL(uint16_t, u16Fsw);
15242 IEM_MC_LOCAL(int16_t, i16Val2);
15243 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15244 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15245 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15246
15247 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15248 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15249
15250 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15251 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15252 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15253
15254 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15255 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15256 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15257 IEM_MC_ELSE()
15258 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15259 IEM_MC_ENDIF();
15260 IEM_MC_USED_FPU();
15261 IEM_MC_ADVANCE_RIP();
15262
15263 IEM_MC_END();
15264 return VINF_SUCCESS;
15265}
15266
15267
15268/** Opcode 0xde !11/3. */
15269FNIEMOP_DEF_1(iemOp_ficomp_m16i, uint8_t, bRm)
15270{
15271 IEMOP_MNEMONIC("ficomp st0,m16i");
15272
15273 IEM_MC_BEGIN(3, 3);
15274 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15275 IEM_MC_LOCAL(uint16_t, u16Fsw);
15276 IEM_MC_LOCAL(int16_t, i16Val2);
15277 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15278 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15279 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15280
15281 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15282 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15283
15284 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15285 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15286 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15287
15288 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15289 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15290 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15291 IEM_MC_ELSE()
15292 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15293 IEM_MC_ENDIF();
15294 IEM_MC_USED_FPU();
15295 IEM_MC_ADVANCE_RIP();
15296
15297 IEM_MC_END();
15298 return VINF_SUCCESS;
15299}
15300
15301
15302/** Opcode 0xde !11/4. */
15303FNIEMOP_DEF_1(iemOp_fisub_m16i, uint8_t, bRm)
15304{
15305 IEMOP_MNEMONIC("fisub m16i");
15306 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisub_r80_by_i16);
15307}
15308
15309
15310/** Opcode 0xde !11/5. */
15311FNIEMOP_DEF_1(iemOp_fisubr_m16i, uint8_t, bRm)
15312{
15313 IEMOP_MNEMONIC("fisubr m16i");
15314 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisubr_r80_by_i16);
15315}
15316
15317
15318/** Opcode 0xde !11/6. */
15319FNIEMOP_DEF_1(iemOp_fidiv_m16i, uint8_t, bRm)
15320{
15321 IEMOP_MNEMONIC("fiadd m16i");
15322 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidiv_r80_by_i16);
15323}
15324
15325
15326/** Opcode 0xde !11/7. */
15327FNIEMOP_DEF_1(iemOp_fidivr_m16i, uint8_t, bRm)
15328{
15329 IEMOP_MNEMONIC("fiadd m16i");
15330 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidivr_r80_by_i16);
15331}
15332
15333
15334/** Opcode 0xde. */
15335FNIEMOP_DEF(iemOp_EscF6)
15336{
15337 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15338 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15339 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15340 {
15341 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15342 {
15343 case 0: return FNIEMOP_CALL_1(iemOp_faddp_stN_st0, bRm);
15344 case 1: return FNIEMOP_CALL_1(iemOp_fmulp_stN_st0, bRm);
15345 case 2: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
15346 case 3: if (bRm == 0xd9)
15347 return FNIEMOP_CALL(iemOp_fcompp);
15348 return IEMOP_RAISE_INVALID_OPCODE();
15349 case 4: return FNIEMOP_CALL_1(iemOp_fsubrp_stN_st0, bRm);
15350 case 5: return FNIEMOP_CALL_1(iemOp_fsubp_stN_st0, bRm);
15351 case 6: return FNIEMOP_CALL_1(iemOp_fdivrp_stN_st0, bRm);
15352 case 7: return FNIEMOP_CALL_1(iemOp_fdivp_stN_st0, bRm);
15353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15354 }
15355 }
15356 else
15357 {
15358 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15359 {
15360 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m16i, bRm);
15361 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m16i, bRm);
15362 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m16i, bRm);
15363 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m16i, bRm);
15364 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m16i, bRm);
15365 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m16i, bRm);
15366 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m16i, bRm);
15367 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m16i, bRm);
15368 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15369 }
15370 }
15371}
15372
15373
15374/** Opcode 0xdf 11/0.
15375 * Undocument instruction, assumed to work like ffree + fincstp. */
15376FNIEMOP_DEF_1(iemOp_ffreep_stN, uint8_t, bRm)
15377{
15378 IEMOP_MNEMONIC("ffreep stN");
15379 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15380
15381 IEM_MC_BEGIN(0, 0);
15382
15383 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15384 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15385
15386 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15387 IEM_MC_FPU_STACK_INC_TOP();
15388 IEM_MC_UPDATE_FPU_OPCODE_IP();
15389
15390 IEM_MC_USED_FPU();
15391 IEM_MC_ADVANCE_RIP();
15392 IEM_MC_END();
15393 return VINF_SUCCESS;
15394}
15395
15396
15397/** Opcode 0xdf 0xe0. */
15398FNIEMOP_DEF(iemOp_fnstsw_ax)
15399{
15400 IEMOP_MNEMONIC("fnstsw ax");
15401 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15402
15403 IEM_MC_BEGIN(0, 1);
15404 IEM_MC_LOCAL(uint16_t, u16Tmp);
15405 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15406 IEM_MC_FETCH_FSW(u16Tmp);
15407 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
15408 IEM_MC_ADVANCE_RIP();
15409 IEM_MC_END();
15410 return VINF_SUCCESS;
15411}
15412
15413
15414/** Opcode 0xdf 11/5. */
15415FNIEMOP_DEF_1(iemOp_fucomip_st0_stN, uint8_t, bRm)
15416{
15417 IEMOP_MNEMONIC("fcomip st0,stN");
15418 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15419}
15420
15421
15422/** Opcode 0xdf 11/6. */
15423FNIEMOP_DEF_1(iemOp_fcomip_st0_stN, uint8_t, bRm)
15424{
15425 IEMOP_MNEMONIC("fcomip st0,stN");
15426 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15427}
15428
15429
15430/** Opcode 0xdf !11/0. */
15431FNIEMOP_STUB_1(iemOp_fild_m16i, uint8_t, bRm);
15432
15433
15434/** Opcode 0xdf !11/1. */
15435FNIEMOP_DEF_1(iemOp_fisttp_m16i, uint8_t, bRm)
15436{
15437 IEMOP_MNEMONIC("fisttp m16i");
15438 IEM_MC_BEGIN(3, 2);
15439 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15440 IEM_MC_LOCAL(uint16_t, u16Fsw);
15441 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15442 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15443 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15444
15445 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15446 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15447 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15448 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15449
15450 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15451 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15452 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15453 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15454 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15455 IEM_MC_ELSE()
15456 IEM_MC_IF_FCW_IM()
15457 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15458 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15459 IEM_MC_ENDIF();
15460 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15461 IEM_MC_ENDIF();
15462 IEM_MC_USED_FPU();
15463 IEM_MC_ADVANCE_RIP();
15464
15465 IEM_MC_END();
15466 return VINF_SUCCESS;
15467}
15468
15469
15470/** Opcode 0xdf !11/2. */
15471FNIEMOP_DEF_1(iemOp_fist_m16i, uint8_t, bRm)
15472{
15473 IEMOP_MNEMONIC("fistp m16i");
15474 IEM_MC_BEGIN(3, 2);
15475 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15476 IEM_MC_LOCAL(uint16_t, u16Fsw);
15477 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15478 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15479 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15480
15481 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15482 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15483 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15484 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15485
15486 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15487 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15488 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15489 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15490 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15491 IEM_MC_ELSE()
15492 IEM_MC_IF_FCW_IM()
15493 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15494 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15495 IEM_MC_ENDIF();
15496 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15497 IEM_MC_ENDIF();
15498 IEM_MC_USED_FPU();
15499 IEM_MC_ADVANCE_RIP();
15500
15501 IEM_MC_END();
15502 return VINF_SUCCESS;
15503}
15504
15505
15506/** Opcode 0xdf !11/3. */
15507FNIEMOP_DEF_1(iemOp_fistp_m16i, uint8_t, bRm)
15508{
15509 IEMOP_MNEMONIC("fistp m16i");
15510 IEM_MC_BEGIN(3, 2);
15511 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15512 IEM_MC_LOCAL(uint16_t, u16Fsw);
15513 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15514 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15515 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15516
15517 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15518 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15519 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15520 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15521
15522 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15523 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15524 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15525 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15526 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15527 IEM_MC_ELSE()
15528 IEM_MC_IF_FCW_IM()
15529 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15530 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15531 IEM_MC_ENDIF();
15532 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15533 IEM_MC_ENDIF();
15534 IEM_MC_USED_FPU();
15535 IEM_MC_ADVANCE_RIP();
15536
15537 IEM_MC_END();
15538 return VINF_SUCCESS;
15539}
15540
15541
15542/** Opcode 0xdf !11/4. */
15543FNIEMOP_STUB_1(iemOp_fbld_m80d, uint8_t, bRm);
15544
15545/** Opcode 0xdf !11/5. */
15546FNIEMOP_STUB_1(iemOp_fild_m64i, uint8_t, bRm);
15547
15548/** Opcode 0xdf !11/6. */
15549FNIEMOP_STUB_1(iemOp_fbstp_m80d, uint8_t, bRm);
15550
15551
15552/** Opcode 0xdf !11/7. */
15553FNIEMOP_DEF_1(iemOp_fistp_m64i, uint8_t, bRm)
15554{
15555 IEMOP_MNEMONIC("fistp m64i");
15556 IEM_MC_BEGIN(3, 2);
15557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15558 IEM_MC_LOCAL(uint16_t, u16Fsw);
15559 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15560 IEM_MC_ARG(int64_t *, pi64Dst, 1);
15561 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15562
15563 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15564 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15565 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15566 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15567
15568 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15569 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15570 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
15571 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
15572 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15573 IEM_MC_ELSE()
15574 IEM_MC_IF_FCW_IM()
15575 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
15576 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
15577 IEM_MC_ENDIF();
15578 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15579 IEM_MC_ENDIF();
15580 IEM_MC_USED_FPU();
15581 IEM_MC_ADVANCE_RIP();
15582
15583 IEM_MC_END();
15584 return VINF_SUCCESS;
15585}
15586
15587
15588/** Opcode 0xdf. */
15589FNIEMOP_DEF(iemOp_EscF7)
15590{
15591 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15592 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15593 {
15594 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15595 {
15596 case 0: return FNIEMOP_CALL_1(iemOp_ffreep_stN, bRm); /* ffree + pop afterwards, since forever according to AMD. */
15597 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, behaves like FXCH ST(i) on intel. */
15598 case 2: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
15599 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
15600 case 4: if (bRm == 0xe0)
15601 return FNIEMOP_CALL(iemOp_fnstsw_ax);
15602 return IEMOP_RAISE_INVALID_OPCODE();
15603 case 5: return FNIEMOP_CALL_1(iemOp_fucomip_st0_stN, bRm);
15604 case 6: return FNIEMOP_CALL_1(iemOp_fcomip_st0_stN, bRm);
15605 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15606 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15607 }
15608 }
15609 else
15610 {
15611 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15612 {
15613 case 0: return FNIEMOP_CALL_1(iemOp_fild_m16i, bRm);
15614 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m16i, bRm);
15615 case 2: return FNIEMOP_CALL_1(iemOp_fist_m16i, bRm);
15616 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m16i, bRm);
15617 case 4: return FNIEMOP_CALL_1(iemOp_fbld_m80d, bRm);
15618 case 5: return FNIEMOP_CALL_1(iemOp_fild_m64i, bRm);
15619 case 6: return FNIEMOP_CALL_1(iemOp_fbstp_m80d, bRm);
15620 case 7: return FNIEMOP_CALL_1(iemOp_fistp_m64i, bRm);
15621 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15622 }
15623 }
15624}
15625
15626
15627/** Opcode 0xe0. */
15628FNIEMOP_DEF(iemOp_loopne_Jb)
15629{
15630 IEMOP_MNEMONIC("loopne Jb");
15631 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15632 IEMOP_HLP_NO_LOCK_PREFIX();
15633 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15634
15635 switch (pIemCpu->enmEffAddrMode)
15636 {
15637 case IEMMODE_16BIT:
15638 IEM_MC_BEGIN(0,0);
15639 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15640 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15641 IEM_MC_REL_JMP_S8(i8Imm);
15642 } IEM_MC_ELSE() {
15643 IEM_MC_ADVANCE_RIP();
15644 } IEM_MC_ENDIF();
15645 IEM_MC_END();
15646 return VINF_SUCCESS;
15647
15648 case IEMMODE_32BIT:
15649 IEM_MC_BEGIN(0,0);
15650 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15651 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15652 IEM_MC_REL_JMP_S8(i8Imm);
15653 } IEM_MC_ELSE() {
15654 IEM_MC_ADVANCE_RIP();
15655 } IEM_MC_ENDIF();
15656 IEM_MC_END();
15657 return VINF_SUCCESS;
15658
15659 case IEMMODE_64BIT:
15660 IEM_MC_BEGIN(0,0);
15661 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15662 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15663 IEM_MC_REL_JMP_S8(i8Imm);
15664 } IEM_MC_ELSE() {
15665 IEM_MC_ADVANCE_RIP();
15666 } IEM_MC_ENDIF();
15667 IEM_MC_END();
15668 return VINF_SUCCESS;
15669
15670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15671 }
15672}
15673
15674
15675/** Opcode 0xe1. */
15676FNIEMOP_DEF(iemOp_loope_Jb)
15677{
15678 IEMOP_MNEMONIC("loope Jb");
15679 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15680 IEMOP_HLP_NO_LOCK_PREFIX();
15681 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15682
15683 switch (pIemCpu->enmEffAddrMode)
15684 {
15685 case IEMMODE_16BIT:
15686 IEM_MC_BEGIN(0,0);
15687 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15688 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15689 IEM_MC_REL_JMP_S8(i8Imm);
15690 } IEM_MC_ELSE() {
15691 IEM_MC_ADVANCE_RIP();
15692 } IEM_MC_ENDIF();
15693 IEM_MC_END();
15694 return VINF_SUCCESS;
15695
15696 case IEMMODE_32BIT:
15697 IEM_MC_BEGIN(0,0);
15698 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15699 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15700 IEM_MC_REL_JMP_S8(i8Imm);
15701 } IEM_MC_ELSE() {
15702 IEM_MC_ADVANCE_RIP();
15703 } IEM_MC_ENDIF();
15704 IEM_MC_END();
15705 return VINF_SUCCESS;
15706
15707 case IEMMODE_64BIT:
15708 IEM_MC_BEGIN(0,0);
15709 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15710 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15711 IEM_MC_REL_JMP_S8(i8Imm);
15712 } IEM_MC_ELSE() {
15713 IEM_MC_ADVANCE_RIP();
15714 } IEM_MC_ENDIF();
15715 IEM_MC_END();
15716 return VINF_SUCCESS;
15717
15718 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15719 }
15720}
15721
15722
15723/** Opcode 0xe2. */
15724FNIEMOP_DEF(iemOp_loop_Jb)
15725{
15726 IEMOP_MNEMONIC("loop Jb");
15727 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15728 IEMOP_HLP_NO_LOCK_PREFIX();
15729 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15730
15731 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
15732 * using the 32-bit operand size override. How can that be restarted? See
15733 * weird pseudo code in intel manual. */
15734 switch (pIemCpu->enmEffAddrMode)
15735 {
15736 case IEMMODE_16BIT:
15737 IEM_MC_BEGIN(0,0);
15738 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15739 IEM_MC_IF_CX_IS_NZ() {
15740 IEM_MC_REL_JMP_S8(i8Imm);
15741 } IEM_MC_ELSE() {
15742 IEM_MC_ADVANCE_RIP();
15743 } IEM_MC_ENDIF();
15744 IEM_MC_END();
15745 return VINF_SUCCESS;
15746
15747 case IEMMODE_32BIT:
15748 IEM_MC_BEGIN(0,0);
15749 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15750 IEM_MC_IF_ECX_IS_NZ() {
15751 IEM_MC_REL_JMP_S8(i8Imm);
15752 } IEM_MC_ELSE() {
15753 IEM_MC_ADVANCE_RIP();
15754 } IEM_MC_ENDIF();
15755 IEM_MC_END();
15756 return VINF_SUCCESS;
15757
15758 case IEMMODE_64BIT:
15759 IEM_MC_BEGIN(0,0);
15760 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15761 IEM_MC_IF_RCX_IS_NZ() {
15762 IEM_MC_REL_JMP_S8(i8Imm);
15763 } IEM_MC_ELSE() {
15764 IEM_MC_ADVANCE_RIP();
15765 } IEM_MC_ENDIF();
15766 IEM_MC_END();
15767 return VINF_SUCCESS;
15768
15769 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15770 }
15771}
15772
15773
15774/** Opcode 0xe3. */
15775FNIEMOP_DEF(iemOp_jecxz_Jb)
15776{
15777 IEMOP_MNEMONIC("jecxz Jb");
15778 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15779 IEMOP_HLP_NO_LOCK_PREFIX();
15780 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15781
15782 switch (pIemCpu->enmEffAddrMode)
15783 {
15784 case IEMMODE_16BIT:
15785 IEM_MC_BEGIN(0,0);
15786 IEM_MC_IF_CX_IS_NZ() {
15787 IEM_MC_ADVANCE_RIP();
15788 } IEM_MC_ELSE() {
15789 IEM_MC_REL_JMP_S8(i8Imm);
15790 } IEM_MC_ENDIF();
15791 IEM_MC_END();
15792 return VINF_SUCCESS;
15793
15794 case IEMMODE_32BIT:
15795 IEM_MC_BEGIN(0,0);
15796 IEM_MC_IF_ECX_IS_NZ() {
15797 IEM_MC_ADVANCE_RIP();
15798 } IEM_MC_ELSE() {
15799 IEM_MC_REL_JMP_S8(i8Imm);
15800 } IEM_MC_ENDIF();
15801 IEM_MC_END();
15802 return VINF_SUCCESS;
15803
15804 case IEMMODE_64BIT:
15805 IEM_MC_BEGIN(0,0);
15806 IEM_MC_IF_RCX_IS_NZ() {
15807 IEM_MC_ADVANCE_RIP();
15808 } IEM_MC_ELSE() {
15809 IEM_MC_REL_JMP_S8(i8Imm);
15810 } IEM_MC_ENDIF();
15811 IEM_MC_END();
15812 return VINF_SUCCESS;
15813
15814 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15815 }
15816}
15817
15818
15819/** Opcode 0xe4 */
15820FNIEMOP_DEF(iemOp_in_AL_Ib)
15821{
15822 IEMOP_MNEMONIC("in eAX,Ib");
15823 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15824 IEMOP_HLP_NO_LOCK_PREFIX();
15825 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
15826}
15827
15828
15829/** Opcode 0xe5 */
15830FNIEMOP_DEF(iemOp_in_eAX_Ib)
15831{
15832 IEMOP_MNEMONIC("in eAX,Ib");
15833 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15834 IEMOP_HLP_NO_LOCK_PREFIX();
15835 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15836}
15837
15838
15839/** Opcode 0xe6 */
15840FNIEMOP_DEF(iemOp_out_Ib_AL)
15841{
15842 IEMOP_MNEMONIC("out Ib,AL");
15843 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15844 IEMOP_HLP_NO_LOCK_PREFIX();
15845 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
15846}
15847
15848
15849/** Opcode 0xe7 */
15850FNIEMOP_DEF(iemOp_out_Ib_eAX)
15851{
15852 IEMOP_MNEMONIC("out Ib,eAX");
15853 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15854 IEMOP_HLP_NO_LOCK_PREFIX();
15855 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15856}
15857
15858
15859/** Opcode 0xe8. */
15860FNIEMOP_DEF(iemOp_call_Jv)
15861{
15862 IEMOP_MNEMONIC("call Jv");
15863 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15864 switch (pIemCpu->enmEffOpSize)
15865 {
15866 case IEMMODE_16BIT:
15867 {
15868 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
15869 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
15870 }
15871
15872 case IEMMODE_32BIT:
15873 {
15874 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
15875 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
15876 }
15877
15878 case IEMMODE_64BIT:
15879 {
15880 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
15881 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
15882 }
15883
15884 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15885 }
15886}
15887
15888
15889/** Opcode 0xe9. */
15890FNIEMOP_DEF(iemOp_jmp_Jv)
15891{
15892 IEMOP_MNEMONIC("jmp Jv");
15893 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15894 switch (pIemCpu->enmEffOpSize)
15895 {
15896 case IEMMODE_16BIT:
15897 {
15898 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
15899 IEM_MC_BEGIN(0, 0);
15900 IEM_MC_REL_JMP_S16(i16Imm);
15901 IEM_MC_END();
15902 return VINF_SUCCESS;
15903 }
15904
15905 case IEMMODE_64BIT:
15906 case IEMMODE_32BIT:
15907 {
15908 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
15909 IEM_MC_BEGIN(0, 0);
15910 IEM_MC_REL_JMP_S32(i32Imm);
15911 IEM_MC_END();
15912 return VINF_SUCCESS;
15913 }
15914
15915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15916 }
15917}
15918
15919
15920/** Opcode 0xea. */
15921FNIEMOP_DEF(iemOp_jmp_Ap)
15922{
15923 IEMOP_MNEMONIC("jmp Ap");
15924 IEMOP_HLP_NO_64BIT();
15925
15926 /* Decode the far pointer address and pass it on to the far call C implementation. */
15927 uint32_t offSeg;
15928 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
15929 IEM_OPCODE_GET_NEXT_U32(&offSeg);
15930 else
15931 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
15932 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
15933 IEMOP_HLP_NO_LOCK_PREFIX();
15934 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
15935}
15936
15937
15938/** Opcode 0xeb. */
15939FNIEMOP_DEF(iemOp_jmp_Jb)
15940{
15941 IEMOP_MNEMONIC("jmp Jb");
15942 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15943 IEMOP_HLP_NO_LOCK_PREFIX();
15944 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15945
15946 IEM_MC_BEGIN(0, 0);
15947 IEM_MC_REL_JMP_S8(i8Imm);
15948 IEM_MC_END();
15949 return VINF_SUCCESS;
15950}
15951
15952
15953/** Opcode 0xec */
15954FNIEMOP_DEF(iemOp_in_AL_DX)
15955{
15956 IEMOP_MNEMONIC("in AL,DX");
15957 IEMOP_HLP_NO_LOCK_PREFIX();
15958 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
15959}
15960
15961
15962/** Opcode 0xed */
15963FNIEMOP_DEF(iemOp_eAX_DX)
15964{
15965 IEMOP_MNEMONIC("in eAX,DX");
15966 IEMOP_HLP_NO_LOCK_PREFIX();
15967 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15968}
15969
15970
15971/** Opcode 0xee */
15972FNIEMOP_DEF(iemOp_out_DX_AL)
15973{
15974 IEMOP_MNEMONIC("out DX,AL");
15975 IEMOP_HLP_NO_LOCK_PREFIX();
15976 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
15977}
15978
15979
15980/** Opcode 0xef */
15981FNIEMOP_DEF(iemOp_out_DX_eAX)
15982{
15983 IEMOP_MNEMONIC("out DX,eAX");
15984 IEMOP_HLP_NO_LOCK_PREFIX();
15985 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15986}
15987
15988
15989/** Opcode 0xf0. */
15990FNIEMOP_DEF(iemOp_lock)
15991{
15992 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock");
15993 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
15994
15995 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15996 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
15997}
15998
15999
16000/** Opcode 0xf2. */
16001FNIEMOP_DEF(iemOp_repne)
16002{
16003 /* This overrides any previous REPE prefix. */
16004 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
16005 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne");
16006 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
16007
16008 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16009 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16010}
16011
16012
16013/** Opcode 0xf3. */
16014FNIEMOP_DEF(iemOp_repe)
16015{
16016 /* This overrides any previous REPNE prefix. */
16017 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
16018 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe");
16019 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
16020
16021 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16022 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16023}
16024
16025
16026/** Opcode 0xf4. */
16027FNIEMOP_DEF(iemOp_hlt)
16028{
16029 IEMOP_HLP_NO_LOCK_PREFIX();
16030 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
16031}
16032
16033
16034/** Opcode 0xf5. */
16035FNIEMOP_DEF(iemOp_cmc)
16036{
16037 IEMOP_MNEMONIC("cmc");
16038 IEMOP_HLP_NO_LOCK_PREFIX();
16039 IEM_MC_BEGIN(0, 0);
16040 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
16041 IEM_MC_ADVANCE_RIP();
16042 IEM_MC_END();
16043 return VINF_SUCCESS;
16044}
16045
16046
16047/**
16048 * Common implementation of 'inc/dec/not/neg Eb'.
16049 *
16050 * @param bRm The RM byte.
16051 * @param pImpl The instruction implementation.
16052 */
16053FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16054{
16055 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16056 {
16057 /* register access */
16058 IEM_MC_BEGIN(2, 0);
16059 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16060 IEM_MC_ARG(uint32_t *, pEFlags, 1);
16061 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16062 IEM_MC_REF_EFLAGS(pEFlags);
16063 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16064 IEM_MC_ADVANCE_RIP();
16065 IEM_MC_END();
16066 }
16067 else
16068 {
16069 /* memory access. */
16070 IEM_MC_BEGIN(2, 2);
16071 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16072 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16073 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16074
16075 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16076 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16077 IEM_MC_FETCH_EFLAGS(EFlags);
16078 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16079 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16080 else
16081 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
16082
16083 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
16084 IEM_MC_COMMIT_EFLAGS(EFlags);
16085 IEM_MC_ADVANCE_RIP();
16086 IEM_MC_END();
16087 }
16088 return VINF_SUCCESS;
16089}
16090
16091
16092/**
16093 * Common implementation of 'inc/dec/not/neg Ev'.
16094 *
16095 * @param bRm The RM byte.
16096 * @param pImpl The instruction implementation.
16097 */
16098FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16099{
16100 /* Registers are handled by a common worker. */
16101 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16102 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16103
16104 /* Memory we do here. */
16105 switch (pIemCpu->enmEffOpSize)
16106 {
16107 case IEMMODE_16BIT:
16108 IEM_MC_BEGIN(2, 2);
16109 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16110 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16111 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16112
16113 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16114 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16115 IEM_MC_FETCH_EFLAGS(EFlags);
16116 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16117 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
16118 else
16119 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
16120
16121 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
16122 IEM_MC_COMMIT_EFLAGS(EFlags);
16123 IEM_MC_ADVANCE_RIP();
16124 IEM_MC_END();
16125 return VINF_SUCCESS;
16126
16127 case IEMMODE_32BIT:
16128 IEM_MC_BEGIN(2, 2);
16129 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16130 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16131 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16132
16133 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16134 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16135 IEM_MC_FETCH_EFLAGS(EFlags);
16136 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16137 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
16138 else
16139 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
16140
16141 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
16142 IEM_MC_COMMIT_EFLAGS(EFlags);
16143 IEM_MC_ADVANCE_RIP();
16144 IEM_MC_END();
16145 return VINF_SUCCESS;
16146
16147 case IEMMODE_64BIT:
16148 IEM_MC_BEGIN(2, 2);
16149 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16150 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16151 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16152
16153 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16154 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16155 IEM_MC_FETCH_EFLAGS(EFlags);
16156 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16157 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
16158 else
16159 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
16160
16161 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
16162 IEM_MC_COMMIT_EFLAGS(EFlags);
16163 IEM_MC_ADVANCE_RIP();
16164 IEM_MC_END();
16165 return VINF_SUCCESS;
16166
16167 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16168 }
16169}
16170
16171
16172/** Opcode 0xf6 /0. */
16173FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
16174{
16175 IEMOP_MNEMONIC("test Eb,Ib");
16176 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16177
16178 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16179 {
16180 /* register access */
16181 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16182 IEMOP_HLP_NO_LOCK_PREFIX();
16183
16184 IEM_MC_BEGIN(3, 0);
16185 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16186 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
16187 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16188 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16189 IEM_MC_REF_EFLAGS(pEFlags);
16190 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16191 IEM_MC_ADVANCE_RIP();
16192 IEM_MC_END();
16193 }
16194 else
16195 {
16196 /* memory access. */
16197 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16198
16199 IEM_MC_BEGIN(3, 2);
16200 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16201 IEM_MC_ARG(uint8_t, u8Src, 1);
16202 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16203 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16204
16205 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
16206 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16207 IEM_MC_ASSIGN(u8Src, u8Imm);
16208 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16209 IEM_MC_FETCH_EFLAGS(EFlags);
16210 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16211
16212 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
16213 IEM_MC_COMMIT_EFLAGS(EFlags);
16214 IEM_MC_ADVANCE_RIP();
16215 IEM_MC_END();
16216 }
16217 return VINF_SUCCESS;
16218}
16219
16220
16221/** Opcode 0xf7 /0. */
16222FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
16223{
16224 IEMOP_MNEMONIC("test Ev,Iv");
16225 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16226 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16227
16228 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16229 {
16230 /* register access */
16231 switch (pIemCpu->enmEffOpSize)
16232 {
16233 case IEMMODE_16BIT:
16234 {
16235 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16236 IEM_MC_BEGIN(3, 0);
16237 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16238 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
16239 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16240 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16241 IEM_MC_REF_EFLAGS(pEFlags);
16242 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16243 IEM_MC_ADVANCE_RIP();
16244 IEM_MC_END();
16245 return VINF_SUCCESS;
16246 }
16247
16248 case IEMMODE_32BIT:
16249 {
16250 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16251 IEM_MC_BEGIN(3, 0);
16252 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16253 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
16254 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16255 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16256 IEM_MC_REF_EFLAGS(pEFlags);
16257 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16258 /* No clearing the high dword here - test doesn't write back the result. */
16259 IEM_MC_ADVANCE_RIP();
16260 IEM_MC_END();
16261 return VINF_SUCCESS;
16262 }
16263
16264 case IEMMODE_64BIT:
16265 {
16266 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16267 IEM_MC_BEGIN(3, 0);
16268 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16269 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
16270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16271 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16272 IEM_MC_REF_EFLAGS(pEFlags);
16273 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16274 IEM_MC_ADVANCE_RIP();
16275 IEM_MC_END();
16276 return VINF_SUCCESS;
16277 }
16278
16279 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16280 }
16281 }
16282 else
16283 {
16284 /* memory access. */
16285 switch (pIemCpu->enmEffOpSize)
16286 {
16287 case IEMMODE_16BIT:
16288 {
16289 IEM_MC_BEGIN(3, 2);
16290 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16291 IEM_MC_ARG(uint16_t, u16Src, 1);
16292 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16293 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16294
16295 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
16296 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16297 IEM_MC_ASSIGN(u16Src, u16Imm);
16298 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16299 IEM_MC_FETCH_EFLAGS(EFlags);
16300 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16301
16302 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
16303 IEM_MC_COMMIT_EFLAGS(EFlags);
16304 IEM_MC_ADVANCE_RIP();
16305 IEM_MC_END();
16306 return VINF_SUCCESS;
16307 }
16308
16309 case IEMMODE_32BIT:
16310 {
16311 IEM_MC_BEGIN(3, 2);
16312 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16313 IEM_MC_ARG(uint32_t, u32Src, 1);
16314 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16315 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16316
16317 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16318 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16319 IEM_MC_ASSIGN(u32Src, u32Imm);
16320 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16321 IEM_MC_FETCH_EFLAGS(EFlags);
16322 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16323
16324 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
16325 IEM_MC_COMMIT_EFLAGS(EFlags);
16326 IEM_MC_ADVANCE_RIP();
16327 IEM_MC_END();
16328 return VINF_SUCCESS;
16329 }
16330
16331 case IEMMODE_64BIT:
16332 {
16333 IEM_MC_BEGIN(3, 2);
16334 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16335 IEM_MC_ARG(uint64_t, u64Src, 1);
16336 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16337 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16338
16339 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16340 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16341 IEM_MC_ASSIGN(u64Src, u64Imm);
16342 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16343 IEM_MC_FETCH_EFLAGS(EFlags);
16344 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16345
16346 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
16347 IEM_MC_COMMIT_EFLAGS(EFlags);
16348 IEM_MC_ADVANCE_RIP();
16349 IEM_MC_END();
16350 return VINF_SUCCESS;
16351 }
16352
16353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16354 }
16355 }
16356}
16357
16358
16359/** Opcode 0xf6 /4, /5, /6 and /7. */
16360FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
16361{
16362 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16363
16364 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16365 {
16366 /* register access */
16367 IEMOP_HLP_NO_LOCK_PREFIX();
16368 IEM_MC_BEGIN(3, 1);
16369 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16370 IEM_MC_ARG(uint8_t, u8Value, 1);
16371 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16372 IEM_MC_LOCAL(int32_t, rc);
16373
16374 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16375 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16376 IEM_MC_REF_EFLAGS(pEFlags);
16377 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
16378 IEM_MC_IF_LOCAL_IS_Z(rc) {
16379 IEM_MC_ADVANCE_RIP();
16380 } IEM_MC_ELSE() {
16381 IEM_MC_RAISE_DIVIDE_ERROR();
16382 } IEM_MC_ENDIF();
16383
16384 IEM_MC_END();
16385 }
16386 else
16387 {
16388 /* memory access. */
16389 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16390
16391 IEM_MC_BEGIN(3, 2);
16392 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16393 IEM_MC_ARG(uint8_t, u8Value, 1);
16394 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16395 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16396 IEM_MC_LOCAL(int32_t, rc);
16397
16398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16399 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
16400 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16401 IEM_MC_REF_EFLAGS(pEFlags);
16402 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
16403 IEM_MC_IF_LOCAL_IS_Z(rc) {
16404 IEM_MC_ADVANCE_RIP();
16405 } IEM_MC_ELSE() {
16406 IEM_MC_RAISE_DIVIDE_ERROR();
16407 } IEM_MC_ENDIF();
16408
16409 IEM_MC_END();
16410 }
16411 return VINF_SUCCESS;
16412}
16413
16414
16415/** Opcode 0xf7 /4, /5, /6 and /7. */
16416FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
16417{
16418 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16419 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16420
16421 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16422 {
16423 /* register access */
16424 switch (pIemCpu->enmEffOpSize)
16425 {
16426 case IEMMODE_16BIT:
16427 {
16428 IEMOP_HLP_NO_LOCK_PREFIX();
16429 IEM_MC_BEGIN(4, 1);
16430 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16431 IEM_MC_ARG(uint16_t *, pu16DX, 1);
16432 IEM_MC_ARG(uint16_t, u16Value, 2);
16433 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16434 IEM_MC_LOCAL(int32_t, rc);
16435
16436 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16437 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16438 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
16439 IEM_MC_REF_EFLAGS(pEFlags);
16440 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
16441 IEM_MC_IF_LOCAL_IS_Z(rc) {
16442 IEM_MC_ADVANCE_RIP();
16443 } IEM_MC_ELSE() {
16444 IEM_MC_RAISE_DIVIDE_ERROR();
16445 } IEM_MC_ENDIF();
16446
16447 IEM_MC_END();
16448 return VINF_SUCCESS;
16449 }
16450
16451 case IEMMODE_32BIT:
16452 {
16453 IEMOP_HLP_NO_LOCK_PREFIX();
16454 IEM_MC_BEGIN(4, 1);
16455 IEM_MC_ARG(uint32_t *, pu32AX, 0);
16456 IEM_MC_ARG(uint32_t *, pu32DX, 1);
16457 IEM_MC_ARG(uint32_t, u32Value, 2);
16458 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16459 IEM_MC_LOCAL(int32_t, rc);
16460
16461 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16462 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
16463 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
16464 IEM_MC_REF_EFLAGS(pEFlags);
16465 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
16466 IEM_MC_IF_LOCAL_IS_Z(rc) {
16467 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
16468 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
16469 IEM_MC_ADVANCE_RIP();
16470 } IEM_MC_ELSE() {
16471 IEM_MC_RAISE_DIVIDE_ERROR();
16472 } IEM_MC_ENDIF();
16473
16474 IEM_MC_END();
16475 return VINF_SUCCESS;
16476 }
16477
16478 case IEMMODE_64BIT:
16479 {
16480 IEMOP_HLP_NO_LOCK_PREFIX();
16481 IEM_MC_BEGIN(4, 1);
16482 IEM_MC_ARG(uint64_t *, pu64AX, 0);
16483 IEM_MC_ARG(uint64_t *, pu64DX, 1);
16484 IEM_MC_ARG(uint64_t, u64Value, 2);
16485 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16486 IEM_MC_LOCAL(int32_t, rc);
16487
16488 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16489 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
16490 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
16491 IEM_MC_REF_EFLAGS(pEFlags);
16492 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
16493 IEM_MC_IF_LOCAL_IS_Z(rc) {
16494 IEM_MC_ADVANCE_RIP();
16495 } IEM_MC_ELSE() {
16496 IEM_MC_RAISE_DIVIDE_ERROR();
16497 } IEM_MC_ENDIF();
16498
16499 IEM_MC_END();
16500 return VINF_SUCCESS;
16501 }
16502
16503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16504 }
16505 }
16506 else
16507 {
16508 /* memory access. */
16509 switch (pIemCpu->enmEffOpSize)
16510 {
16511 case IEMMODE_16BIT:
16512 {
16513 IEMOP_HLP_NO_LOCK_PREFIX();
16514 IEM_MC_BEGIN(4, 2);
16515 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16516 IEM_MC_ARG(uint16_t *, pu16DX, 1);
16517 IEM_MC_ARG(uint16_t, u16Value, 2);
16518 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16519 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16520 IEM_MC_LOCAL(int32_t, rc);
16521
16522 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16523 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
16524 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16525 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
16526 IEM_MC_REF_EFLAGS(pEFlags);
16527 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
16528 IEM_MC_IF_LOCAL_IS_Z(rc) {
16529 IEM_MC_ADVANCE_RIP();
16530 } IEM_MC_ELSE() {
16531 IEM_MC_RAISE_DIVIDE_ERROR();
16532 } IEM_MC_ENDIF();
16533
16534 IEM_MC_END();
16535 return VINF_SUCCESS;
16536 }
16537
16538 case IEMMODE_32BIT:
16539 {
16540 IEMOP_HLP_NO_LOCK_PREFIX();
16541 IEM_MC_BEGIN(4, 2);
16542 IEM_MC_ARG(uint32_t *, pu32AX, 0);
16543 IEM_MC_ARG(uint32_t *, pu32DX, 1);
16544 IEM_MC_ARG(uint32_t, u32Value, 2);
16545 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16546 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16547 IEM_MC_LOCAL(int32_t, rc);
16548
16549 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16550 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
16551 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
16552 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
16553 IEM_MC_REF_EFLAGS(pEFlags);
16554 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
16555 IEM_MC_IF_LOCAL_IS_Z(rc) {
16556 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
16557 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
16558 IEM_MC_ADVANCE_RIP();
16559 } IEM_MC_ELSE() {
16560 IEM_MC_RAISE_DIVIDE_ERROR();
16561 } IEM_MC_ENDIF();
16562
16563 IEM_MC_END();
16564 return VINF_SUCCESS;
16565 }
16566
16567 case IEMMODE_64BIT:
16568 {
16569 IEMOP_HLP_NO_LOCK_PREFIX();
16570 IEM_MC_BEGIN(4, 2);
16571 IEM_MC_ARG(uint64_t *, pu64AX, 0);
16572 IEM_MC_ARG(uint64_t *, pu64DX, 1);
16573 IEM_MC_ARG(uint64_t, u64Value, 2);
16574 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16575 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16576 IEM_MC_LOCAL(int32_t, rc);
16577
16578 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16579 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
16580 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
16581 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
16582 IEM_MC_REF_EFLAGS(pEFlags);
16583 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
16584 IEM_MC_IF_LOCAL_IS_Z(rc) {
16585 IEM_MC_ADVANCE_RIP();
16586 } IEM_MC_ELSE() {
16587 IEM_MC_RAISE_DIVIDE_ERROR();
16588 } IEM_MC_ENDIF();
16589
16590 IEM_MC_END();
16591 return VINF_SUCCESS;
16592 }
16593
16594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16595 }
16596 }
16597}
16598
16599/** Opcode 0xf6. */
16600FNIEMOP_DEF(iemOp_Grp3_Eb)
16601{
16602 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16603 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16604 {
16605 case 0:
16606 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
16607 case 1:
16608 return IEMOP_RAISE_INVALID_OPCODE();
16609 case 2:
16610 IEMOP_MNEMONIC("not Eb");
16611 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
16612 case 3:
16613 IEMOP_MNEMONIC("neg Eb");
16614 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
16615 case 4:
16616 IEMOP_MNEMONIC("mul Eb");
16617 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16618 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
16619 case 5:
16620 IEMOP_MNEMONIC("imul Eb");
16621 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16622 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
16623 case 6:
16624 IEMOP_MNEMONIC("div Eb");
16625 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16626 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
16627 case 7:
16628 IEMOP_MNEMONIC("idiv Eb");
16629 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16630 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
16631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16632 }
16633}
16634
16635
16636/** Opcode 0xf7. */
16637FNIEMOP_DEF(iemOp_Grp3_Ev)
16638{
16639 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16640 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16641 {
16642 case 0:
16643 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
16644 case 1:
16645 return IEMOP_RAISE_INVALID_OPCODE();
16646 case 2:
16647 IEMOP_MNEMONIC("not Ev");
16648 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
16649 case 3:
16650 IEMOP_MNEMONIC("neg Ev");
16651 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
16652 case 4:
16653 IEMOP_MNEMONIC("mul Ev");
16654 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16655 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
16656 case 5:
16657 IEMOP_MNEMONIC("imul Ev");
16658 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16659 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
16660 case 6:
16661 IEMOP_MNEMONIC("div Ev");
16662 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16663 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
16664 case 7:
16665 IEMOP_MNEMONIC("idiv Ev");
16666 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16667 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
16668 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16669 }
16670}
16671
16672
16673/** Opcode 0xf8. */
16674FNIEMOP_DEF(iemOp_clc)
16675{
16676 IEMOP_MNEMONIC("clc");
16677 IEMOP_HLP_NO_LOCK_PREFIX();
16678 IEM_MC_BEGIN(0, 0);
16679 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
16680 IEM_MC_ADVANCE_RIP();
16681 IEM_MC_END();
16682 return VINF_SUCCESS;
16683}
16684
16685
16686/** Opcode 0xf9. */
16687FNIEMOP_DEF(iemOp_stc)
16688{
16689 IEMOP_MNEMONIC("stc");
16690 IEMOP_HLP_NO_LOCK_PREFIX();
16691 IEM_MC_BEGIN(0, 0);
16692 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
16693 IEM_MC_ADVANCE_RIP();
16694 IEM_MC_END();
16695 return VINF_SUCCESS;
16696}
16697
16698
16699/** Opcode 0xfa. */
16700FNIEMOP_DEF(iemOp_cli)
16701{
16702 IEMOP_MNEMONIC("cli");
16703 IEMOP_HLP_NO_LOCK_PREFIX();
16704 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
16705}
16706
16707
16708FNIEMOP_DEF(iemOp_sti)
16709{
16710 IEMOP_MNEMONIC("sti");
16711 IEMOP_HLP_NO_LOCK_PREFIX();
16712 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
16713}
16714
16715
16716/** Opcode 0xfc. */
16717FNIEMOP_DEF(iemOp_cld)
16718{
16719 IEMOP_MNEMONIC("cld");
16720 IEMOP_HLP_NO_LOCK_PREFIX();
16721 IEM_MC_BEGIN(0, 0);
16722 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
16723 IEM_MC_ADVANCE_RIP();
16724 IEM_MC_END();
16725 return VINF_SUCCESS;
16726}
16727
16728
16729/** Opcode 0xfd. */
16730FNIEMOP_DEF(iemOp_std)
16731{
16732 IEMOP_MNEMONIC("std");
16733 IEMOP_HLP_NO_LOCK_PREFIX();
16734 IEM_MC_BEGIN(0, 0);
16735 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
16736 IEM_MC_ADVANCE_RIP();
16737 IEM_MC_END();
16738 return VINF_SUCCESS;
16739}
16740
16741
16742/** Opcode 0xfe. */
16743FNIEMOP_DEF(iemOp_Grp4)
16744{
16745 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16746 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16747 {
16748 case 0:
16749 IEMOP_MNEMONIC("inc Ev");
16750 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
16751 case 1:
16752 IEMOP_MNEMONIC("dec Ev");
16753 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
16754 default:
16755 IEMOP_MNEMONIC("grp4-ud");
16756 return IEMOP_RAISE_INVALID_OPCODE();
16757 }
16758}
16759
16760
16761/**
16762 * Opcode 0xff /2.
16763 * @param bRm The RM byte.
16764 */
16765FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
16766{
16767 IEMOP_MNEMONIC("calln Ev");
16768 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
16769 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16770
16771 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16772 {
16773 /* The new RIP is taken from a register. */
16774 switch (pIemCpu->enmEffOpSize)
16775 {
16776 case IEMMODE_16BIT:
16777 IEM_MC_BEGIN(1, 0);
16778 IEM_MC_ARG(uint16_t, u16Target, 0);
16779 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16780 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
16781 IEM_MC_END()
16782 return VINF_SUCCESS;
16783
16784 case IEMMODE_32BIT:
16785 IEM_MC_BEGIN(1, 0);
16786 IEM_MC_ARG(uint32_t, u32Target, 0);
16787 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16788 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
16789 IEM_MC_END()
16790 return VINF_SUCCESS;
16791
16792 case IEMMODE_64BIT:
16793 IEM_MC_BEGIN(1, 0);
16794 IEM_MC_ARG(uint64_t, u64Target, 0);
16795 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16796 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
16797 IEM_MC_END()
16798 return VINF_SUCCESS;
16799
16800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16801 }
16802 }
16803 else
16804 {
16805 /* The new RIP is taken from a register. */
16806 switch (pIemCpu->enmEffOpSize)
16807 {
16808 case IEMMODE_16BIT:
16809 IEM_MC_BEGIN(1, 1);
16810 IEM_MC_ARG(uint16_t, u16Target, 0);
16811 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16812 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16813 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16814 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
16815 IEM_MC_END()
16816 return VINF_SUCCESS;
16817
16818 case IEMMODE_32BIT:
16819 IEM_MC_BEGIN(1, 1);
16820 IEM_MC_ARG(uint32_t, u32Target, 0);
16821 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16822 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16823 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16824 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
16825 IEM_MC_END()
16826 return VINF_SUCCESS;
16827
16828 case IEMMODE_64BIT:
16829 IEM_MC_BEGIN(1, 1);
16830 IEM_MC_ARG(uint64_t, u64Target, 0);
16831 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16832 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16833 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16834 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
16835 IEM_MC_END()
16836 return VINF_SUCCESS;
16837
16838 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16839 }
16840 }
16841}
16842
16843typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize);
16844
16845FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImpl)
16846{
16847 /* Registers? How?? */
16848 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16849 return IEMOP_RAISE_INVALID_OPCODE(); /* callf eax is not legal */
16850
16851 /* Far pointer loaded from memory. */
16852 switch (pIemCpu->enmEffOpSize)
16853 {
16854 case IEMMODE_16BIT:
16855 IEM_MC_BEGIN(3, 1);
16856 IEM_MC_ARG(uint16_t, u16Sel, 0);
16857 IEM_MC_ARG(uint16_t, offSeg, 1);
16858 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
16859 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16860 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16861 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16862 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16863 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
16864 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16865 IEM_MC_END();
16866 return VINF_SUCCESS;
16867
16868 case IEMMODE_64BIT:
16869 /** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1)
16870 * and will apparently ignore REX.W, at least for the jmp far qword [rsp]
16871 * and call far qword [rsp] encodings. */
16872 if (!IEM_IS_GUEST_CPU_AMD(pIemCpu))
16873 {
16874 IEM_MC_BEGIN(3, 1);
16875 IEM_MC_ARG(uint16_t, u16Sel, 0);
16876 IEM_MC_ARG(uint64_t, offSeg, 1);
16877 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
16878 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16879 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16880 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16881 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16882 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
16883 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16884 IEM_MC_END();
16885 return VINF_SUCCESS;
16886 }
16887 /* AMD falls thru. */
16888
16889 case IEMMODE_32BIT:
16890 IEM_MC_BEGIN(3, 1);
16891 IEM_MC_ARG(uint16_t, u16Sel, 0);
16892 IEM_MC_ARG(uint32_t, offSeg, 1);
16893 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
16894 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16896 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16897 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16898 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
16899 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16900 IEM_MC_END();
16901 return VINF_SUCCESS;
16902
16903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16904 }
16905}
16906
16907
16908/**
16909 * Opcode 0xff /3.
16910 * @param bRm The RM byte.
16911 */
16912FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
16913{
16914 IEMOP_MNEMONIC("callf Ep");
16915 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_callf);
16916}
16917
16918
16919/**
16920 * Opcode 0xff /4.
16921 * @param bRm The RM byte.
16922 */
16923FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
16924{
16925 IEMOP_MNEMONIC("jmpn Ev");
16926 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
16927 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16928
16929 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16930 {
16931 /* The new RIP is taken from a register. */
16932 switch (pIemCpu->enmEffOpSize)
16933 {
16934 case IEMMODE_16BIT:
16935 IEM_MC_BEGIN(0, 1);
16936 IEM_MC_LOCAL(uint16_t, u16Target);
16937 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16938 IEM_MC_SET_RIP_U16(u16Target);
16939 IEM_MC_END()
16940 return VINF_SUCCESS;
16941
16942 case IEMMODE_32BIT:
16943 IEM_MC_BEGIN(0, 1);
16944 IEM_MC_LOCAL(uint32_t, u32Target);
16945 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16946 IEM_MC_SET_RIP_U32(u32Target);
16947 IEM_MC_END()
16948 return VINF_SUCCESS;
16949
16950 case IEMMODE_64BIT:
16951 IEM_MC_BEGIN(0, 1);
16952 IEM_MC_LOCAL(uint64_t, u64Target);
16953 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16954 IEM_MC_SET_RIP_U64(u64Target);
16955 IEM_MC_END()
16956 return VINF_SUCCESS;
16957
16958 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16959 }
16960 }
16961 else
16962 {
16963 /* The new RIP is taken from a memory location. */
16964 switch (pIemCpu->enmEffOpSize)
16965 {
16966 case IEMMODE_16BIT:
16967 IEM_MC_BEGIN(0, 2);
16968 IEM_MC_LOCAL(uint16_t, u16Target);
16969 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16970 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16971 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16972 IEM_MC_SET_RIP_U16(u16Target);
16973 IEM_MC_END()
16974 return VINF_SUCCESS;
16975
16976 case IEMMODE_32BIT:
16977 IEM_MC_BEGIN(0, 2);
16978 IEM_MC_LOCAL(uint32_t, u32Target);
16979 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16980 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16981 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16982 IEM_MC_SET_RIP_U32(u32Target);
16983 IEM_MC_END()
16984 return VINF_SUCCESS;
16985
16986 case IEMMODE_64BIT:
16987 IEM_MC_BEGIN(0, 2);
16988 IEM_MC_LOCAL(uint64_t, u64Target);
16989 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16990 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16991 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16992 IEM_MC_SET_RIP_U64(u64Target);
16993 IEM_MC_END()
16994 return VINF_SUCCESS;
16995
16996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16997 }
16998 }
16999}
17000
17001
17002/**
17003 * Opcode 0xff /5.
17004 * @param bRm The RM byte.
17005 */
17006FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
17007{
17008 IEMOP_MNEMONIC("jmpf Ep");
17009 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp);
17010}
17011
17012
17013/**
17014 * Opcode 0xff /6.
17015 * @param bRm The RM byte.
17016 */
17017FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
17018{
17019 IEMOP_MNEMONIC("push Ev");
17020 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
17021
17022 /* Registers are handled by a common worker. */
17023 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17024 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17025
17026 /* Memory we do here. */
17027 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
17028 switch (pIemCpu->enmEffOpSize)
17029 {
17030 case IEMMODE_16BIT:
17031 IEM_MC_BEGIN(0, 2);
17032 IEM_MC_LOCAL(uint16_t, u16Src);
17033 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17034 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17035 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17036 IEM_MC_PUSH_U16(u16Src);
17037 IEM_MC_ADVANCE_RIP();
17038 IEM_MC_END();
17039 return VINF_SUCCESS;
17040
17041 case IEMMODE_32BIT:
17042 IEM_MC_BEGIN(0, 2);
17043 IEM_MC_LOCAL(uint32_t, u32Src);
17044 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17045 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17046 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17047 IEM_MC_PUSH_U32(u32Src);
17048 IEM_MC_ADVANCE_RIP();
17049 IEM_MC_END();
17050 return VINF_SUCCESS;
17051
17052 case IEMMODE_64BIT:
17053 IEM_MC_BEGIN(0, 2);
17054 IEM_MC_LOCAL(uint64_t, u64Src);
17055 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17056 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17057 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17058 IEM_MC_PUSH_U64(u64Src);
17059 IEM_MC_ADVANCE_RIP();
17060 IEM_MC_END();
17061 return VINF_SUCCESS;
17062
17063 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17064 }
17065}
17066
17067
17068/** Opcode 0xff. */
17069FNIEMOP_DEF(iemOp_Grp5)
17070{
17071 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17072 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17073 {
17074 case 0:
17075 IEMOP_MNEMONIC("inc Ev");
17076 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
17077 case 1:
17078 IEMOP_MNEMONIC("dec Ev");
17079 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
17080 case 2:
17081 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
17082 case 3:
17083 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
17084 case 4:
17085 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
17086 case 5:
17087 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
17088 case 6:
17089 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
17090 case 7:
17091 IEMOP_MNEMONIC("grp5-ud");
17092 return IEMOP_RAISE_INVALID_OPCODE();
17093 }
17094 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
17095}
17096
17097
17098
17099const PFNIEMOP g_apfnOneByteMap[256] =
17100{
17101 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
17102 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
17103 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
17104 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
17105 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
17106 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
17107 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
17108 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
17109 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
17110 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
17111 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
17112 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
17113 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
17114 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
17115 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
17116 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
17117 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
17118 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
17119 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
17120 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
17121 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
17122 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
17123 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
17124 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
17125 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw_movsx_Gv_Ev,
17126 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
17127 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
17128 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
17129 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
17130 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
17131 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
17132 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
17133 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
17134 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
17135 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
17136 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_Grp1A,
17137 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
17138 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
17139 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
17140 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
17141 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
17142 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
17143 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
17144 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
17145 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
17146 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
17147 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
17148 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
17149 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
17150 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
17151 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
17152 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
17153 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
17154 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
17155 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
17156 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
17157 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
17158 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
17159 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
17160 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
17161 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
17162 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
17163 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
17164 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
17165};
17166
17167
17168/** @} */
17169
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette