VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 49483

Last change on this file since 49483 was 49482, checked in by vboxsync, 11 years ago

VMM: Warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 586.1 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 49482 2013-11-14 15:43:58Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 if (pImpl != &g_iemAImpl_test)
133 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
134 IEM_MC_ADVANCE_RIP();
135 IEM_MC_END();
136 break;
137
138 case IEMMODE_64BIT:
139 IEM_MC_BEGIN(3, 0);
140 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
141 IEM_MC_ARG(uint64_t, u64Src, 1);
142 IEM_MC_ARG(uint32_t *, pEFlags, 2);
143
144 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
145 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
146 IEM_MC_REF_EFLAGS(pEFlags);
147 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
148
149 IEM_MC_ADVANCE_RIP();
150 IEM_MC_END();
151 break;
152 }
153 }
154 else
155 {
156 /*
157 * We're accessing memory.
158 * Note! We're putting the eflags on the stack here so we can commit them
159 * after the memory.
160 */
161 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
162 switch (pIemCpu->enmEffOpSize)
163 {
164 case IEMMODE_16BIT:
165 IEM_MC_BEGIN(3, 2);
166 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
167 IEM_MC_ARG(uint16_t, u16Src, 1);
168 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
169 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
170
171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
172 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
173 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
174 IEM_MC_FETCH_EFLAGS(EFlags);
175 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
177 else
178 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
179
180 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
181 IEM_MC_COMMIT_EFLAGS(EFlags);
182 IEM_MC_ADVANCE_RIP();
183 IEM_MC_END();
184 break;
185
186 case IEMMODE_32BIT:
187 IEM_MC_BEGIN(3, 2);
188 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
189 IEM_MC_ARG(uint32_t, u32Src, 1);
190 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
191 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
192
193 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
194 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
196 IEM_MC_FETCH_EFLAGS(EFlags);
197 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
199 else
200 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
201
202 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
203 IEM_MC_COMMIT_EFLAGS(EFlags);
204 IEM_MC_ADVANCE_RIP();
205 IEM_MC_END();
206 break;
207
208 case IEMMODE_64BIT:
209 IEM_MC_BEGIN(3, 2);
210 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
211 IEM_MC_ARG(uint64_t, u64Src, 1);
212 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
213 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
214
215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
216 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
217 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
218 IEM_MC_FETCH_EFLAGS(EFlags);
219 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
221 else
222 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
223
224 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
225 IEM_MC_COMMIT_EFLAGS(EFlags);
226 IEM_MC_ADVANCE_RIP();
227 IEM_MC_END();
228 break;
229 }
230 }
231 return VINF_SUCCESS;
232}
233
234
235/**
236 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
237 * the destination.
238 *
239 * @param pImpl Pointer to the instruction implementation (assembly).
240 */
241FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
242{
243 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
244 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
245
246 /*
247 * If rm is denoting a register, no more instruction bytes.
248 */
249 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
250 {
251 IEM_MC_BEGIN(3, 0);
252 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
253 IEM_MC_ARG(uint8_t, u8Src, 1);
254 IEM_MC_ARG(uint32_t *, pEFlags, 2);
255
256 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
257 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
258 IEM_MC_REF_EFLAGS(pEFlags);
259 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
260
261 IEM_MC_ADVANCE_RIP();
262 IEM_MC_END();
263 }
264 else
265 {
266 /*
267 * We're accessing memory.
268 */
269 IEM_MC_BEGIN(3, 1);
270 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
271 IEM_MC_ARG(uint8_t, u8Src, 1);
272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
273 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
274
275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
276 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
277 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
278 IEM_MC_REF_EFLAGS(pEFlags);
279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
280
281 IEM_MC_ADVANCE_RIP();
282 IEM_MC_END();
283 }
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
290 * register as the destination.
291 *
292 * @param pImpl Pointer to the instruction implementation (assembly).
293 */
294FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
295{
296 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
297 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
298
299 /*
300 * If rm is denoting a register, no more instruction bytes.
301 */
302 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
303 {
304 switch (pIemCpu->enmEffOpSize)
305 {
306 case IEMMODE_16BIT:
307 IEM_MC_BEGIN(3, 0);
308 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
309 IEM_MC_ARG(uint16_t, u16Src, 1);
310 IEM_MC_ARG(uint32_t *, pEFlags, 2);
311
312 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
313 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
314 IEM_MC_REF_EFLAGS(pEFlags);
315 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
316
317 IEM_MC_ADVANCE_RIP();
318 IEM_MC_END();
319 break;
320
321 case IEMMODE_32BIT:
322 IEM_MC_BEGIN(3, 0);
323 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
324 IEM_MC_ARG(uint32_t, u32Src, 1);
325 IEM_MC_ARG(uint32_t *, pEFlags, 2);
326
327 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
328 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
329 IEM_MC_REF_EFLAGS(pEFlags);
330 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
331
332 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
333 IEM_MC_ADVANCE_RIP();
334 IEM_MC_END();
335 break;
336
337 case IEMMODE_64BIT:
338 IEM_MC_BEGIN(3, 0);
339 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
340 IEM_MC_ARG(uint64_t, u64Src, 1);
341 IEM_MC_ARG(uint32_t *, pEFlags, 2);
342
343 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
344 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
345 IEM_MC_REF_EFLAGS(pEFlags);
346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
347
348 IEM_MC_ADVANCE_RIP();
349 IEM_MC_END();
350 break;
351 }
352 }
353 else
354 {
355 /*
356 * We're accessing memory.
357 */
358 switch (pIemCpu->enmEffOpSize)
359 {
360 case IEMMODE_16BIT:
361 IEM_MC_BEGIN(3, 1);
362 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
363 IEM_MC_ARG(uint16_t, u16Src, 1);
364 IEM_MC_ARG(uint32_t *, pEFlags, 2);
365 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
366
367 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
368 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
369 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
370 IEM_MC_REF_EFLAGS(pEFlags);
371 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
372
373 IEM_MC_ADVANCE_RIP();
374 IEM_MC_END();
375 break;
376
377 case IEMMODE_32BIT:
378 IEM_MC_BEGIN(3, 1);
379 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
380 IEM_MC_ARG(uint32_t, u32Src, 1);
381 IEM_MC_ARG(uint32_t *, pEFlags, 2);
382 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
383
384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
385 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
386 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
387 IEM_MC_REF_EFLAGS(pEFlags);
388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
389
390 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
391 IEM_MC_ADVANCE_RIP();
392 IEM_MC_END();
393 break;
394
395 case IEMMODE_64BIT:
396 IEM_MC_BEGIN(3, 1);
397 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
398 IEM_MC_ARG(uint64_t, u64Src, 1);
399 IEM_MC_ARG(uint32_t *, pEFlags, 2);
400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
401
402 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
403 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
404 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
405 IEM_MC_REF_EFLAGS(pEFlags);
406 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
407
408 IEM_MC_ADVANCE_RIP();
409 IEM_MC_END();
410 break;
411 }
412 }
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
419 * a byte immediate.
420 *
421 * @param pImpl Pointer to the instruction implementation (assembly).
422 */
423FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
424{
425 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
426 IEMOP_HLP_NO_LOCK_PREFIX();
427
428 IEM_MC_BEGIN(3, 0);
429 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
430 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
431 IEM_MC_ARG(uint32_t *, pEFlags, 2);
432
433 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
434 IEM_MC_REF_EFLAGS(pEFlags);
435 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
436
437 IEM_MC_ADVANCE_RIP();
438 IEM_MC_END();
439 return VINF_SUCCESS;
440}
441
442
443/**
444 * Common worker for instructions like ADD, AND, OR, ++ with working on
445 * AX/EAX/RAX with a word/dword immediate.
446 *
447 * @param pImpl Pointer to the instruction implementation (assembly).
448 */
449FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
450{
451 switch (pIemCpu->enmEffOpSize)
452 {
453 case IEMMODE_16BIT:
454 {
455 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
456 IEMOP_HLP_NO_LOCK_PREFIX();
457
458 IEM_MC_BEGIN(3, 0);
459 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
460 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
461 IEM_MC_ARG(uint32_t *, pEFlags, 2);
462
463 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
464 IEM_MC_REF_EFLAGS(pEFlags);
465 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
466
467 IEM_MC_ADVANCE_RIP();
468 IEM_MC_END();
469 return VINF_SUCCESS;
470 }
471
472 case IEMMODE_32BIT:
473 {
474 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
475 IEMOP_HLP_NO_LOCK_PREFIX();
476
477 IEM_MC_BEGIN(3, 0);
478 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
479 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
480 IEM_MC_ARG(uint32_t *, pEFlags, 2);
481
482 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
483 IEM_MC_REF_EFLAGS(pEFlags);
484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
485
486 if (pImpl != &g_iemAImpl_test)
487 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
488 IEM_MC_ADVANCE_RIP();
489 IEM_MC_END();
490 return VINF_SUCCESS;
491 }
492
493 case IEMMODE_64BIT:
494 {
495 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
496 IEMOP_HLP_NO_LOCK_PREFIX();
497
498 IEM_MC_BEGIN(3, 0);
499 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
500 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
501 IEM_MC_ARG(uint32_t *, pEFlags, 2);
502
503 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
504 IEM_MC_REF_EFLAGS(pEFlags);
505 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
506
507 IEM_MC_ADVANCE_RIP();
508 IEM_MC_END();
509 return VINF_SUCCESS;
510 }
511
512 IEM_NOT_REACHED_DEFAULT_CASE_RET();
513 }
514}
515
516
517/** Opcodes 0xf1, 0xd6. */
518FNIEMOP_DEF(iemOp_Invalid)
519{
520 IEMOP_MNEMONIC("Invalid");
521 return IEMOP_RAISE_INVALID_OPCODE();
522}
523
524
525
526/** @name ..... opcodes.
527 *
528 * @{
529 */
530
531/** @} */
532
533
534/** @name Two byte opcodes (first byte 0x0f).
535 *
536 * @{
537 */
538
539/** Opcode 0x0f 0x00 /0. */
540FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
541{
542 IEMOP_MNEMONIC("sldt Rv/Mw");
543 IEMOP_HLP_NO_REAL_OR_V86_MODE();
544
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
548 switch (pIemCpu->enmEffOpSize)
549 {
550 case IEMMODE_16BIT:
551 IEM_MC_BEGIN(0, 1);
552 IEM_MC_LOCAL(uint16_t, u16Ldtr);
553 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
554 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Ldtr);
555 IEM_MC_ADVANCE_RIP();
556 IEM_MC_END();
557 break;
558
559 case IEMMODE_32BIT:
560 IEM_MC_BEGIN(0, 1);
561 IEM_MC_LOCAL(uint32_t, u32Ldtr);
562 IEM_MC_FETCH_LDTR_U32(u32Ldtr);
563 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Ldtr);
564 IEM_MC_ADVANCE_RIP();
565 IEM_MC_END();
566 break;
567
568 case IEMMODE_64BIT:
569 IEM_MC_BEGIN(0, 1);
570 IEM_MC_LOCAL(uint64_t, u64Ldtr);
571 IEM_MC_FETCH_LDTR_U64(u64Ldtr);
572 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Ldtr);
573 IEM_MC_ADVANCE_RIP();
574 IEM_MC_END();
575 break;
576
577 IEM_NOT_REACHED_DEFAULT_CASE_RET();
578 }
579 }
580 else
581 {
582 IEM_MC_BEGIN(0, 2);
583 IEM_MC_LOCAL(uint16_t, u16Ldtr);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
585 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
586 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
587 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
588 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Ldtr);
589 IEM_MC_ADVANCE_RIP();
590 IEM_MC_END();
591 }
592 return VINF_SUCCESS;
593}
594
595
596/** Opcode 0x0f 0x00 /1. */
597FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
598{
599 IEMOP_MNEMONIC("str Rv/Mw");
600 IEMOP_HLP_NO_REAL_OR_V86_MODE();
601
602 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
603 {
604 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
605 switch (pIemCpu->enmEffOpSize)
606 {
607 case IEMMODE_16BIT:
608 IEM_MC_BEGIN(0, 1);
609 IEM_MC_LOCAL(uint16_t, u16Tr);
610 IEM_MC_FETCH_TR_U16(u16Tr);
611 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tr);
612 IEM_MC_ADVANCE_RIP();
613 IEM_MC_END();
614 break;
615
616 case IEMMODE_32BIT:
617 IEM_MC_BEGIN(0, 1);
618 IEM_MC_LOCAL(uint32_t, u32Tr);
619 IEM_MC_FETCH_TR_U32(u32Tr);
620 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tr);
621 IEM_MC_ADVANCE_RIP();
622 IEM_MC_END();
623 break;
624
625 case IEMMODE_64BIT:
626 IEM_MC_BEGIN(0, 1);
627 IEM_MC_LOCAL(uint64_t, u64Tr);
628 IEM_MC_FETCH_TR_U64(u64Tr);
629 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tr);
630 IEM_MC_ADVANCE_RIP();
631 IEM_MC_END();
632 break;
633
634 IEM_NOT_REACHED_DEFAULT_CASE_RET();
635 }
636 }
637 else
638 {
639 IEM_MC_BEGIN(0, 2);
640 IEM_MC_LOCAL(uint16_t, u16Tr);
641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
642 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
643 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
644 IEM_MC_FETCH_TR_U16(u16Tr);
645 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tr);
646 IEM_MC_ADVANCE_RIP();
647 IEM_MC_END();
648 }
649 return VINF_SUCCESS;
650}
651
652
653/** Opcode 0x0f 0x00 /2. */
654FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
655{
656 IEMOP_MNEMONIC("lldt Ew");
657 IEMOP_HLP_NO_REAL_OR_V86_MODE();
658
659 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
660 {
661 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
662 IEM_MC_BEGIN(1, 0);
663 IEM_MC_ARG(uint16_t, u16Sel, 0);
664 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
665 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
666 IEM_MC_END();
667 }
668 else
669 {
670 IEM_MC_BEGIN(1, 1);
671 IEM_MC_ARG(uint16_t, u16Sel, 0);
672 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
673 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
674 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
675 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */
676 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
677 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
678 IEM_MC_END();
679 }
680 return VINF_SUCCESS;
681}
682
683
684/** Opcode 0x0f 0x00 /3. */
685FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
686{
687 IEMOP_MNEMONIC("ltr Ew");
688 IEMOP_HLP_NO_REAL_OR_V86_MODE();
689
690 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
691 {
692 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
693 IEM_MC_BEGIN(1, 0);
694 IEM_MC_ARG(uint16_t, u16Sel, 0);
695 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
696 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
697 IEM_MC_END();
698 }
699 else
700 {
701 IEM_MC_BEGIN(1, 1);
702 IEM_MC_ARG(uint16_t, u16Sel, 0);
703 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
704 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
705 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
706 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */
707 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
708 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
709 IEM_MC_END();
710 }
711 return VINF_SUCCESS;
712}
713
714
715/** Opcode 0x0f 0x00 /3. */
716FNIEMOP_DEF_2(iemOpCommonGrp6VerX, uint8_t, bRm, bool, fWrite)
717{
718 IEMOP_HLP_NO_REAL_OR_V86_MODE();
719
720 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
721 {
722 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
723 IEM_MC_BEGIN(2, 0);
724 IEM_MC_ARG(uint16_t, u16Sel, 0);
725 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
726 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
727 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
728 IEM_MC_END();
729 }
730 else
731 {
732 IEM_MC_BEGIN(2, 1);
733 IEM_MC_ARG(uint16_t, u16Sel, 0);
734 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
735 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
736 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
737 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
738 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
739 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
740 IEM_MC_END();
741 }
742 return VINF_SUCCESS;
743}
744
745
746/** Opcode 0x0f 0x00 /4. */
747FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm)
748{
749 IEMOP_MNEMONIC("verr Ew");
750 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, false);
751}
752
753
754/** Opcode 0x0f 0x00 /5. */
755FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm)
756{
757 IEMOP_MNEMONIC("verr Ew");
758 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, true);
759}
760
761
762/** Opcode 0x0f 0x00. */
763FNIEMOP_DEF(iemOp_Grp6)
764{
765 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
766 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
767 {
768 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
769 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
770 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
771 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
772 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
773 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
774 case 6: return IEMOP_RAISE_INVALID_OPCODE();
775 case 7: return IEMOP_RAISE_INVALID_OPCODE();
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778
779}
780
781
782/** Opcode 0x0f 0x01 /0. */
783FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
784{
785 IEMOP_MNEMONIC("sgdt Ms");
786 IEMOP_HLP_64BIT_OP_SIZE();
787 IEM_MC_BEGIN(3, 1);
788 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
789 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
790 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
792 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
793 IEM_MC_CALL_CIMPL_3(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
794 IEM_MC_END();
795 return VINF_SUCCESS;
796}
797
798
799/** Opcode 0x0f 0x01 /0. */
800FNIEMOP_DEF(iemOp_Grp7_vmcall)
801{
802 IEMOP_BITCH_ABOUT_STUB();
803 return IEMOP_RAISE_INVALID_OPCODE();
804}
805
806
807/** Opcode 0x0f 0x01 /0. */
808FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
809{
810 IEMOP_BITCH_ABOUT_STUB();
811 return IEMOP_RAISE_INVALID_OPCODE();
812}
813
814
815/** Opcode 0x0f 0x01 /0. */
816FNIEMOP_DEF(iemOp_Grp7_vmresume)
817{
818 IEMOP_BITCH_ABOUT_STUB();
819 return IEMOP_RAISE_INVALID_OPCODE();
820}
821
822
823/** Opcode 0x0f 0x01 /0. */
824FNIEMOP_DEF(iemOp_Grp7_vmxoff)
825{
826 IEMOP_BITCH_ABOUT_STUB();
827 return IEMOP_RAISE_INVALID_OPCODE();
828}
829
830
831/** Opcode 0x0f 0x01 /1. */
832FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
833{
834 IEMOP_MNEMONIC("sidt Ms");
835 IEMOP_HLP_64BIT_OP_SIZE();
836 IEM_MC_BEGIN(3, 1);
837 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
838 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
839 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
840 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
841 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
842 IEM_MC_CALL_CIMPL_3(iemCImpl_sidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
843 IEM_MC_END();
844 return VINF_SUCCESS;
845}
846
847
848/** Opcode 0x0f 0x01 /1. */
849FNIEMOP_DEF(iemOp_Grp7_monitor)
850{
851 IEMOP_MNEMONIC("monitor");
852 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */
853 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pIemCpu->iEffSeg);
854}
855
856
857/** Opcode 0x0f 0x01 /1. */
858FNIEMOP_DEF(iemOp_Grp7_mwait)
859{
860 IEMOP_MNEMONIC("mwait"); /** @todo Verify that mwait is allergic to lock prefixes. */
861 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
862 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait);
863}
864
865
866/** Opcode 0x0f 0x01 /2. */
867FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
868{
869 IEMOP_MNEMONIC("lgdt");
870 IEMOP_HLP_NO_LOCK_PREFIX();
871
872 IEMOP_HLP_64BIT_OP_SIZE();
873 IEM_MC_BEGIN(3, 1);
874 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
875 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
876 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
877 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
878 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
879 IEM_MC_END();
880 return VINF_SUCCESS;
881}
882
883
884/** Opcode 0x0f 0x01 /2. */
885FNIEMOP_DEF(iemOp_Grp7_xgetbv)
886{
887 AssertFailed();
888 return IEMOP_RAISE_INVALID_OPCODE();
889}
890
891
892/** Opcode 0x0f 0x01 /2. */
893FNIEMOP_DEF(iemOp_Grp7_xsetbv)
894{
895 AssertFailed();
896 return IEMOP_RAISE_INVALID_OPCODE();
897}
898
899
900/** Opcode 0x0f 0x01 /3. */
901FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
902{
903 IEMOP_HLP_NO_LOCK_PREFIX();
904
905 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
906 ? IEMMODE_64BIT
907 : pIemCpu->enmEffOpSize;
908 IEM_MC_BEGIN(3, 1);
909 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
910 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
911 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
912 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
913 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
914 IEM_MC_END();
915 return VINF_SUCCESS;
916}
917
918
919/** Opcode 0x0f 0x01 0xd8. */
920FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
921
922/** Opcode 0x0f 0x01 0xd9. */
923FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
924
925/** Opcode 0x0f 0x01 0xda. */
926FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
927
928/** Opcode 0x0f 0x01 0xdb. */
929FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
930
931/** Opcode 0x0f 0x01 0xdc. */
932FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
933
934/** Opcode 0x0f 0x01 0xdd. */
935FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
936
937/** Opcode 0x0f 0x01 0xde. */
938FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
939
940/** Opcode 0x0f 0x01 0xdf. */
941FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
942
943/** Opcode 0x0f 0x01 /4. */
944FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
945{
946 IEMOP_MNEMONIC("smsw");
947 IEMOP_HLP_NO_LOCK_PREFIX();
948 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
949 {
950 switch (pIemCpu->enmEffOpSize)
951 {
952 case IEMMODE_16BIT:
953 IEM_MC_BEGIN(0, 1);
954 IEM_MC_LOCAL(uint16_t, u16Tmp);
955 IEM_MC_FETCH_CR0_U16(u16Tmp);
956 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
957 IEM_MC_ADVANCE_RIP();
958 IEM_MC_END();
959 return VINF_SUCCESS;
960
961 case IEMMODE_32BIT:
962 IEM_MC_BEGIN(0, 1);
963 IEM_MC_LOCAL(uint32_t, u32Tmp);
964 IEM_MC_FETCH_CR0_U32(u32Tmp);
965 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
966 IEM_MC_ADVANCE_RIP();
967 IEM_MC_END();
968 return VINF_SUCCESS;
969
970 case IEMMODE_64BIT:
971 IEM_MC_BEGIN(0, 1);
972 IEM_MC_LOCAL(uint64_t, u64Tmp);
973 IEM_MC_FETCH_CR0_U64(u64Tmp);
974 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
975 IEM_MC_ADVANCE_RIP();
976 IEM_MC_END();
977 return VINF_SUCCESS;
978
979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
980 }
981 }
982 else
983 {
984 /* Ignore operand size here, memory refs are always 16-bit. */
985 IEM_MC_BEGIN(0, 2);
986 IEM_MC_LOCAL(uint16_t, u16Tmp);
987 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
988 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
989 IEM_MC_FETCH_CR0_U16(u16Tmp);
990 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
991 IEM_MC_ADVANCE_RIP();
992 IEM_MC_END();
993 return VINF_SUCCESS;
994 }
995}
996
997
998/** Opcode 0x0f 0x01 /6. */
999FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
1000{
1001 /* The operand size is effectively ignored, all is 16-bit and only the
1002 lower 3-bits are used. */
1003 IEMOP_MNEMONIC("lmsw");
1004 IEMOP_HLP_NO_LOCK_PREFIX();
1005 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1006 {
1007 IEM_MC_BEGIN(1, 0);
1008 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1009 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1010 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1011 IEM_MC_END();
1012 }
1013 else
1014 {
1015 IEM_MC_BEGIN(1, 1);
1016 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1017 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1018 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1019 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
1020 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1021 IEM_MC_END();
1022 }
1023 return VINF_SUCCESS;
1024}
1025
1026
1027/** Opcode 0x0f 0x01 /7. */
1028FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
1029{
1030 IEMOP_MNEMONIC("invlpg");
1031 IEMOP_HLP_NO_LOCK_PREFIX();
1032 IEM_MC_BEGIN(1, 1);
1033 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
1034 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1035 IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
1036 IEM_MC_END();
1037 return VINF_SUCCESS;
1038}
1039
1040
1041/** Opcode 0x0f 0x01 /7. */
1042FNIEMOP_DEF(iemOp_Grp7_swapgs)
1043{
1044 IEMOP_MNEMONIC("swapgs");
1045 IEMOP_HLP_NO_LOCK_PREFIX();
1046 IEMOP_HLP_ONLY_64BIT();
1047 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs);
1048}
1049
1050
1051/** Opcode 0x0f 0x01 /7. */
1052FNIEMOP_DEF(iemOp_Grp7_rdtscp)
1053{
1054 NOREF(pIemCpu);
1055 IEMOP_BITCH_ABOUT_STUB();
1056 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1057}
1058
1059
1060/** Opcode 0x0f 0x01. */
1061FNIEMOP_DEF(iemOp_Grp7)
1062{
1063 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1064 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1065 {
1066 case 0:
1067 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1068 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
1069 switch (bRm & X86_MODRM_RM_MASK)
1070 {
1071 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
1072 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
1073 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
1074 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
1075 }
1076 return IEMOP_RAISE_INVALID_OPCODE();
1077
1078 case 1:
1079 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1080 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
1081 switch (bRm & X86_MODRM_RM_MASK)
1082 {
1083 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
1084 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
1085 }
1086 return IEMOP_RAISE_INVALID_OPCODE();
1087
1088 case 2:
1089 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1090 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
1091 switch (bRm & X86_MODRM_RM_MASK)
1092 {
1093 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
1094 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
1095 }
1096 return IEMOP_RAISE_INVALID_OPCODE();
1097
1098 case 3:
1099 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1100 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
1101 switch (bRm & X86_MODRM_RM_MASK)
1102 {
1103 case 0: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmrun);
1104 case 1: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmmcall);
1105 case 2: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmload);
1106 case 3: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmsave);
1107 case 4: return FNIEMOP_CALL(iemOp_Grp7_Amd_stgi);
1108 case 5: return FNIEMOP_CALL(iemOp_Grp7_Amd_clgi);
1109 case 6: return FNIEMOP_CALL(iemOp_Grp7_Amd_skinit);
1110 case 7: return FNIEMOP_CALL(iemOp_Grp7_Amd_invlpga);
1111 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1112 }
1113
1114 case 4:
1115 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
1116
1117 case 5:
1118 return IEMOP_RAISE_INVALID_OPCODE();
1119
1120 case 6:
1121 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
1122
1123 case 7:
1124 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1125 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
1126 switch (bRm & X86_MODRM_RM_MASK)
1127 {
1128 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
1129 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
1130 }
1131 return IEMOP_RAISE_INVALID_OPCODE();
1132
1133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1134 }
1135}
1136
1137/** Opcode 0x0f 0x00 /3. */
1138FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar)
1139{
1140 IEMOP_HLP_NO_REAL_OR_V86_MODE();
1141 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1142
1143 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1144 {
1145 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1146 switch (pIemCpu->enmEffOpSize)
1147 {
1148 case IEMMODE_16BIT:
1149 {
1150 IEM_MC_BEGIN(4, 0);
1151 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1152 IEM_MC_ARG(uint16_t, u16Sel, 1);
1153 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1154 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1155
1156 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1157 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1158 IEM_MC_REF_EFLAGS(pEFlags);
1159 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1160
1161 IEM_MC_END();
1162 return VINF_SUCCESS;
1163 }
1164
1165 case IEMMODE_32BIT:
1166 case IEMMODE_64BIT:
1167 {
1168 IEM_MC_BEGIN(4, 0);
1169 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1170 IEM_MC_ARG(uint16_t, u16Sel, 1);
1171 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1172 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1173
1174 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1175 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1176 IEM_MC_REF_EFLAGS(pEFlags);
1177 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1178
1179 IEM_MC_END();
1180 return VINF_SUCCESS;
1181 }
1182
1183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1184 }
1185 }
1186 else
1187 {
1188 switch (pIemCpu->enmEffOpSize)
1189 {
1190 case IEMMODE_16BIT:
1191 {
1192 IEM_MC_BEGIN(4, 1);
1193 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1194 IEM_MC_ARG(uint16_t, u16Sel, 1);
1195 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1196 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1197 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1198
1199 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1200 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1201
1202 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1203 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1204 IEM_MC_REF_EFLAGS(pEFlags);
1205 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1206
1207 IEM_MC_END();
1208 return VINF_SUCCESS;
1209 }
1210
1211 case IEMMODE_32BIT:
1212 case IEMMODE_64BIT:
1213 {
1214 IEM_MC_BEGIN(4, 1);
1215 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1216 IEM_MC_ARG(uint16_t, u16Sel, 1);
1217 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1218 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1219 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1220
1221 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1222 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1223/** @todo testcase: make sure it's a 16-bit read. */
1224
1225 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1226 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1227 IEM_MC_REF_EFLAGS(pEFlags);
1228 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1229
1230 IEM_MC_END();
1231 return VINF_SUCCESS;
1232 }
1233
1234 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1235 }
1236 }
1237}
1238
1239
1240
1241/** Opcode 0x0f 0x02. */
1242FNIEMOP_DEF(iemOp_lar_Gv_Ew)
1243{
1244 IEMOP_MNEMONIC("lar Gv,Ew");
1245 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true);
1246}
1247
1248
1249/** Opcode 0x0f 0x03. */
1250FNIEMOP_DEF(iemOp_lsl_Gv_Ew)
1251{
1252 IEMOP_MNEMONIC("lsl Gv,Ew");
1253 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false);
1254}
1255
1256
1257/** Opcode 0x0f 0x04. */
1258FNIEMOP_DEF(iemOp_syscall)
1259{
1260 IEMOP_MNEMONIC("syscall");
1261 IEMOP_HLP_NO_LOCK_PREFIX();
1262 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall);
1263}
1264
1265
1266/** Opcode 0x0f 0x05. */
1267FNIEMOP_DEF(iemOp_clts)
1268{
1269 IEMOP_MNEMONIC("clts");
1270 IEMOP_HLP_NO_LOCK_PREFIX();
1271 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
1272}
1273
1274
1275/** Opcode 0x0f 0x06. */
1276FNIEMOP_DEF(iemOp_sysret)
1277{
1278 IEMOP_MNEMONIC("sysret");
1279 IEMOP_HLP_NO_LOCK_PREFIX();
1280 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret);
1281}
1282
1283
1284/** Opcode 0x0f 0x08. */
1285FNIEMOP_STUB(iemOp_invd);
1286
1287
1288/** Opcode 0x0f 0x09. */
1289FNIEMOP_DEF(iemOp_wbinvd)
1290{
1291 IEMOP_MNEMONIC("wbinvd");
1292 IEMOP_HLP_NO_LOCK_PREFIX();
1293 IEM_MC_BEGIN(0, 0);
1294 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
1295 IEM_MC_ADVANCE_RIP();
1296 IEM_MC_END();
1297 return VINF_SUCCESS; /* ignore for now */
1298}
1299
1300
1301/** Opcode 0x0f 0x0b. */
1302FNIEMOP_STUB(iemOp_ud2);
1303
1304/** Opcode 0x0f 0x0d. */
1305FNIEMOP_DEF(iemOp_nop_Ev_GrpP)
1306{
1307 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
1308 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
1309 X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF))
1310 {
1311 IEMOP_MNEMONIC("GrpP");
1312 return IEMOP_RAISE_INVALID_OPCODE();
1313 }
1314
1315 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1316 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1317 {
1318 IEMOP_MNEMONIC("GrpP");
1319 return IEMOP_RAISE_INVALID_OPCODE();
1320 }
1321
1322 IEMOP_HLP_NO_LOCK_PREFIX();
1323 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1324 {
1325 case 2: /* Aliased to /0 for the time being. */
1326 case 4: /* Aliased to /0 for the time being. */
1327 case 5: /* Aliased to /0 for the time being. */
1328 case 6: /* Aliased to /0 for the time being. */
1329 case 7: /* Aliased to /0 for the time being. */
1330 case 0: IEMOP_MNEMONIC("prefetch"); break;
1331 case 1: IEMOP_MNEMONIC("prefetchw "); break;
1332 case 3: IEMOP_MNEMONIC("prefetchw"); break;
1333 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1334 }
1335
1336 IEM_MC_BEGIN(0, 1);
1337 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1338 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1339 /* Currently a NOP. */
1340 IEM_MC_ADVANCE_RIP();
1341 IEM_MC_END();
1342 return VINF_SUCCESS;
1343}
1344
1345
1346/** Opcode 0x0f 0x0e. */
1347FNIEMOP_STUB(iemOp_femms);
1348
1349
1350/** Opcode 0x0f 0x0f 0x0c. */
1351FNIEMOP_STUB(iemOp_3Dnow_pi2fw_Pq_Qq);
1352
1353/** Opcode 0x0f 0x0f 0x0d. */
1354FNIEMOP_STUB(iemOp_3Dnow_pi2fd_Pq_Qq);
1355
1356/** Opcode 0x0f 0x0f 0x1c. */
1357FNIEMOP_STUB(iemOp_3Dnow_pf2fw_Pq_Qq);
1358
1359/** Opcode 0x0f 0x0f 0x1d. */
1360FNIEMOP_STUB(iemOp_3Dnow_pf2fd_Pq_Qq);
1361
1362/** Opcode 0x0f 0x0f 0x8a. */
1363FNIEMOP_STUB(iemOp_3Dnow_pfnacc_Pq_Qq);
1364
1365/** Opcode 0x0f 0x0f 0x8e. */
1366FNIEMOP_STUB(iemOp_3Dnow_pfpnacc_Pq_Qq);
1367
1368/** Opcode 0x0f 0x0f 0x90. */
1369FNIEMOP_STUB(iemOp_3Dnow_pfcmpge_Pq_Qq);
1370
1371/** Opcode 0x0f 0x0f 0x94. */
1372FNIEMOP_STUB(iemOp_3Dnow_pfmin_Pq_Qq);
1373
1374/** Opcode 0x0f 0x0f 0x96. */
1375FNIEMOP_STUB(iemOp_3Dnow_pfrcp_Pq_Qq);
1376
1377/** Opcode 0x0f 0x0f 0x97. */
1378FNIEMOP_STUB(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1379
1380/** Opcode 0x0f 0x0f 0x9a. */
1381FNIEMOP_STUB(iemOp_3Dnow_pfsub_Pq_Qq);
1382
1383/** Opcode 0x0f 0x0f 0x9e. */
1384FNIEMOP_STUB(iemOp_3Dnow_pfadd_PQ_Qq);
1385
1386/** Opcode 0x0f 0x0f 0xa0. */
1387FNIEMOP_STUB(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1388
1389/** Opcode 0x0f 0x0f 0xa4. */
1390FNIEMOP_STUB(iemOp_3Dnow_pfmax_Pq_Qq);
1391
1392/** Opcode 0x0f 0x0f 0xa6. */
1393FNIEMOP_STUB(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1394
1395/** Opcode 0x0f 0x0f 0xa7. */
1396FNIEMOP_STUB(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1397
1398/** Opcode 0x0f 0x0f 0xaa. */
1399FNIEMOP_STUB(iemOp_3Dnow_pfsubr_Pq_Qq);
1400
1401/** Opcode 0x0f 0x0f 0xae. */
1402FNIEMOP_STUB(iemOp_3Dnow_pfacc_PQ_Qq);
1403
1404/** Opcode 0x0f 0x0f 0xb0. */
1405FNIEMOP_STUB(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1406
1407/** Opcode 0x0f 0x0f 0xb4. */
1408FNIEMOP_STUB(iemOp_3Dnow_pfmul_Pq_Qq);
1409
1410/** Opcode 0x0f 0x0f 0xb6. */
1411FNIEMOP_STUB(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1412
1413/** Opcode 0x0f 0x0f 0xb7. */
1414FNIEMOP_STUB(iemOp_3Dnow_pmulhrw_Pq_Qq);
1415
1416/** Opcode 0x0f 0x0f 0xbb. */
1417FNIEMOP_STUB(iemOp_3Dnow_pswapd_Pq_Qq);
1418
1419/** Opcode 0x0f 0x0f 0xbf. */
1420FNIEMOP_STUB(iemOp_3Dnow_pavgusb_PQ_Qq);
1421
1422
1423/** Opcode 0x0f 0x0f. */
1424FNIEMOP_DEF(iemOp_3Dnow)
1425{
1426 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_3DNOW))
1427 {
1428 IEMOP_MNEMONIC("3Dnow");
1429 return IEMOP_RAISE_INVALID_OPCODE();
1430 }
1431
1432 /* This is pretty sparse, use switch instead of table. */
1433 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
1434 switch (b)
1435 {
1436 case 0x0c: return FNIEMOP_CALL(iemOp_3Dnow_pi2fw_Pq_Qq);
1437 case 0x0d: return FNIEMOP_CALL(iemOp_3Dnow_pi2fd_Pq_Qq);
1438 case 0x1c: return FNIEMOP_CALL(iemOp_3Dnow_pf2fw_Pq_Qq);
1439 case 0x1d: return FNIEMOP_CALL(iemOp_3Dnow_pf2fd_Pq_Qq);
1440 case 0x8a: return FNIEMOP_CALL(iemOp_3Dnow_pfnacc_Pq_Qq);
1441 case 0x8e: return FNIEMOP_CALL(iemOp_3Dnow_pfpnacc_Pq_Qq);
1442 case 0x90: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpge_Pq_Qq);
1443 case 0x94: return FNIEMOP_CALL(iemOp_3Dnow_pfmin_Pq_Qq);
1444 case 0x96: return FNIEMOP_CALL(iemOp_3Dnow_pfrcp_Pq_Qq);
1445 case 0x97: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1446 case 0x9a: return FNIEMOP_CALL(iemOp_3Dnow_pfsub_Pq_Qq);
1447 case 0x9e: return FNIEMOP_CALL(iemOp_3Dnow_pfadd_PQ_Qq);
1448 case 0xa0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1449 case 0xa4: return FNIEMOP_CALL(iemOp_3Dnow_pfmax_Pq_Qq);
1450 case 0xa6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1451 case 0xa7: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1452 case 0xaa: return FNIEMOP_CALL(iemOp_3Dnow_pfsubr_Pq_Qq);
1453 case 0xae: return FNIEMOP_CALL(iemOp_3Dnow_pfacc_PQ_Qq);
1454 case 0xb0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1455 case 0xb4: return FNIEMOP_CALL(iemOp_3Dnow_pfmul_Pq_Qq);
1456 case 0xb6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1457 case 0xb7: return FNIEMOP_CALL(iemOp_3Dnow_pmulhrw_Pq_Qq);
1458 case 0xbb: return FNIEMOP_CALL(iemOp_3Dnow_pswapd_Pq_Qq);
1459 case 0xbf: return FNIEMOP_CALL(iemOp_3Dnow_pavgusb_PQ_Qq);
1460 default:
1461 return IEMOP_RAISE_INVALID_OPCODE();
1462 }
1463}
1464
1465
1466/** Opcode 0x0f 0x10. */
1467FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
1468/** Opcode 0x0f 0x11. */
1469FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
1470/** Opcode 0x0f 0x12. */
1471FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); //NEXT
1472/** Opcode 0x0f 0x13. */
1473FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); //NEXT
1474/** Opcode 0x0f 0x14. */
1475FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
1476/** Opcode 0x0f 0x15. */
1477FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
1478/** Opcode 0x0f 0x16. */
1479FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); //NEXT
1480/** Opcode 0x0f 0x17. */
1481FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); //NEXT
1482
1483
1484/** Opcode 0x0f 0x18. */
1485FNIEMOP_DEF(iemOp_prefetch_Grp16)
1486{
1487 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1488 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1489 {
1490 IEMOP_HLP_NO_LOCK_PREFIX();
1491 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1492 {
1493 case 4: /* Aliased to /0 for the time being according to AMD. */
1494 case 5: /* Aliased to /0 for the time being according to AMD. */
1495 case 6: /* Aliased to /0 for the time being according to AMD. */
1496 case 7: /* Aliased to /0 for the time being according to AMD. */
1497 case 0: IEMOP_MNEMONIC("prefetchNTA m8"); break;
1498 case 1: IEMOP_MNEMONIC("prefetchT0 m8"); break;
1499 case 2: IEMOP_MNEMONIC("prefetchT1 m8"); break;
1500 case 3: IEMOP_MNEMONIC("prefetchT2 m8"); break;
1501 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1502 }
1503
1504 IEM_MC_BEGIN(0, 1);
1505 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1506 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1507 /* Currently a NOP. */
1508 IEM_MC_ADVANCE_RIP();
1509 IEM_MC_END();
1510 return VINF_SUCCESS;
1511 }
1512
1513 return IEMOP_RAISE_INVALID_OPCODE();
1514}
1515
1516
1517/** Opcode 0x0f 0x19..0x1f. */
1518FNIEMOP_DEF(iemOp_nop_Ev)
1519{
1520 IEMOP_HLP_NO_LOCK_PREFIX();
1521 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1522 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1523 {
1524 IEM_MC_BEGIN(0, 0);
1525 IEM_MC_ADVANCE_RIP();
1526 IEM_MC_END();
1527 }
1528 else
1529 {
1530 IEM_MC_BEGIN(0, 1);
1531 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1532 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1533 /* Currently a NOP. */
1534 IEM_MC_ADVANCE_RIP();
1535 IEM_MC_END();
1536 }
1537 return VINF_SUCCESS;
1538}
1539
1540
1541/** Opcode 0x0f 0x20. */
1542FNIEMOP_DEF(iemOp_mov_Rd_Cd)
1543{
1544 /* mod is ignored, as is operand size overrides. */
1545 IEMOP_MNEMONIC("mov Rd,Cd");
1546 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1547 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1548 else
1549 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1550
1551 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1552 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1553 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1554 {
1555 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1556 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1557 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1558 iCrReg |= 8;
1559 }
1560 switch (iCrReg)
1561 {
1562 case 0: case 2: case 3: case 4: case 8:
1563 break;
1564 default:
1565 return IEMOP_RAISE_INVALID_OPCODE();
1566 }
1567 IEMOP_HLP_DONE_DECODING();
1568
1569 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
1570}
1571
1572
1573/** Opcode 0x0f 0x21. */
1574FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1575{
1576 IEMOP_MNEMONIC("mov Rd,Dd");
1577 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1578 IEMOP_HLP_NO_LOCK_PREFIX();
1579 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1580 return IEMOP_RAISE_INVALID_OPCODE();
1581 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1582 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1583 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1584}
1585
1586
1587/** Opcode 0x0f 0x22. */
1588FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1589{
1590 /* mod is ignored, as is operand size overrides. */
1591 IEMOP_MNEMONIC("mov Cd,Rd");
1592 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1593 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1594 else
1595 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1596
1597 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1598 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1599 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1600 {
1601 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1602 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1603 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1604 iCrReg |= 8;
1605 }
1606 switch (iCrReg)
1607 {
1608 case 0: case 2: case 3: case 4: case 8:
1609 break;
1610 default:
1611 return IEMOP_RAISE_INVALID_OPCODE();
1612 }
1613 IEMOP_HLP_DONE_DECODING();
1614
1615 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1616}
1617
1618
1619/** Opcode 0x0f 0x23. */
1620FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1621{
1622 IEMOP_MNEMONIC("mov Dd,Rd");
1623 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1624 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1625 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1626 return IEMOP_RAISE_INVALID_OPCODE();
1627 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1628 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1629 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1630}
1631
1632
1633/** Opcode 0x0f 0x24. */
1634FNIEMOP_DEF(iemOp_mov_Rd_Td)
1635{
1636 IEMOP_MNEMONIC("mov Rd,Td");
1637 /* The RM byte is not considered, see testcase. */
1638 return IEMOP_RAISE_INVALID_OPCODE();
1639}
1640
1641
1642/** Opcode 0x0f 0x26. */
1643FNIEMOP_DEF(iemOp_mov_Td_Rd)
1644{
1645 IEMOP_MNEMONIC("mov Td,Rd");
1646 /* The RM byte is not considered, see testcase. */
1647 return IEMOP_RAISE_INVALID_OPCODE();
1648}
1649
1650
1651/** Opcode 0x0f 0x28. */
1652FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1653/** Opcode 0x0f 0x29. */
1654FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1655/** Opcode 0x0f 0x2a. */
1656FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); //NEXT
1657/** Opcode 0x0f 0x2b. */
1658FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd); //NEXT:XP
1659/** Opcode 0x0f 0x2c. */
1660FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); //NEXT
1661/** Opcode 0x0f 0x2d. */
1662FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1663/** Opcode 0x0f 0x2e. */
1664FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); //NEXT
1665/** Opcode 0x0f 0x2f. */
1666FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1667
1668
1669/** Opcode 0x0f 0x30. */
1670FNIEMOP_DEF(iemOp_wrmsr)
1671{
1672 IEMOP_MNEMONIC("wrmsr");
1673 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1674 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_wrmsr);
1675}
1676
1677
1678/** Opcode 0x0f 0x31. */
1679FNIEMOP_DEF(iemOp_rdtsc)
1680{
1681 IEMOP_MNEMONIC("rdtsc");
1682 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1683 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1684}
1685
1686
1687/** Opcode 0x0f 0x33. */
1688FNIEMOP_DEF(iemOp_rdmsr)
1689{
1690 IEMOP_MNEMONIC("rdmsr");
1691 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1692 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdmsr);
1693}
1694
1695
1696/** Opcode 0x0f 0x34. */
1697FNIEMOP_STUB(iemOp_rdpmc);
1698/** Opcode 0x0f 0x34. */
1699FNIEMOP_STUB(iemOp_sysenter);
1700/** Opcode 0x0f 0x35. */
1701FNIEMOP_STUB(iemOp_sysexit);
1702/** Opcode 0x0f 0x37. */
1703FNIEMOP_STUB(iemOp_getsec);
1704/** Opcode 0x0f 0x38. */
1705FNIEMOP_UD_STUB(iemOp_3byte_Esc_A4); /* Here there be dragons... */
1706/** Opcode 0x0f 0x3a. */
1707FNIEMOP_UD_STUB(iemOp_3byte_Esc_A5); /* Here there be dragons... */
1708/** Opcode 0x0f 0x3c (?). */
1709FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1710
1711/**
1712 * Implements a conditional move.
1713 *
1714 * Wish there was an obvious way to do this where we could share and reduce
1715 * code bloat.
1716 *
1717 * @param a_Cnd The conditional "microcode" operation.
1718 */
1719#define CMOV_X(a_Cnd) \
1720 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1721 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1722 { \
1723 switch (pIemCpu->enmEffOpSize) \
1724 { \
1725 case IEMMODE_16BIT: \
1726 IEM_MC_BEGIN(0, 1); \
1727 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1728 a_Cnd { \
1729 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1730 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1731 } IEM_MC_ENDIF(); \
1732 IEM_MC_ADVANCE_RIP(); \
1733 IEM_MC_END(); \
1734 return VINF_SUCCESS; \
1735 \
1736 case IEMMODE_32BIT: \
1737 IEM_MC_BEGIN(0, 1); \
1738 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1739 a_Cnd { \
1740 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1741 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1742 } IEM_MC_ELSE() { \
1743 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1744 } IEM_MC_ENDIF(); \
1745 IEM_MC_ADVANCE_RIP(); \
1746 IEM_MC_END(); \
1747 return VINF_SUCCESS; \
1748 \
1749 case IEMMODE_64BIT: \
1750 IEM_MC_BEGIN(0, 1); \
1751 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1752 a_Cnd { \
1753 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1754 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1755 } IEM_MC_ENDIF(); \
1756 IEM_MC_ADVANCE_RIP(); \
1757 IEM_MC_END(); \
1758 return VINF_SUCCESS; \
1759 \
1760 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1761 } \
1762 } \
1763 else \
1764 { \
1765 switch (pIemCpu->enmEffOpSize) \
1766 { \
1767 case IEMMODE_16BIT: \
1768 IEM_MC_BEGIN(0, 2); \
1769 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1770 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1771 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1772 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1773 a_Cnd { \
1774 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1775 } IEM_MC_ENDIF(); \
1776 IEM_MC_ADVANCE_RIP(); \
1777 IEM_MC_END(); \
1778 return VINF_SUCCESS; \
1779 \
1780 case IEMMODE_32BIT: \
1781 IEM_MC_BEGIN(0, 2); \
1782 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1783 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1784 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1785 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1786 a_Cnd { \
1787 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1788 } IEM_MC_ELSE() { \
1789 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1790 } IEM_MC_ENDIF(); \
1791 IEM_MC_ADVANCE_RIP(); \
1792 IEM_MC_END(); \
1793 return VINF_SUCCESS; \
1794 \
1795 case IEMMODE_64BIT: \
1796 IEM_MC_BEGIN(0, 2); \
1797 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1798 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1799 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1800 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1801 a_Cnd { \
1802 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1803 } IEM_MC_ENDIF(); \
1804 IEM_MC_ADVANCE_RIP(); \
1805 IEM_MC_END(); \
1806 return VINF_SUCCESS; \
1807 \
1808 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1809 } \
1810 } do {} while (0)
1811
1812
1813
1814/** Opcode 0x0f 0x40. */
1815FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1816{
1817 IEMOP_MNEMONIC("cmovo Gv,Ev");
1818 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1819}
1820
1821
1822/** Opcode 0x0f 0x41. */
1823FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1824{
1825 IEMOP_MNEMONIC("cmovno Gv,Ev");
1826 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1827}
1828
1829
1830/** Opcode 0x0f 0x42. */
1831FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1832{
1833 IEMOP_MNEMONIC("cmovc Gv,Ev");
1834 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1835}
1836
1837
1838/** Opcode 0x0f 0x43. */
1839FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1840{
1841 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1842 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1843}
1844
1845
1846/** Opcode 0x0f 0x44. */
1847FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1848{
1849 IEMOP_MNEMONIC("cmove Gv,Ev");
1850 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1851}
1852
1853
1854/** Opcode 0x0f 0x45. */
1855FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1856{
1857 IEMOP_MNEMONIC("cmovne Gv,Ev");
1858 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1859}
1860
1861
1862/** Opcode 0x0f 0x46. */
1863FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1864{
1865 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1866 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1867}
1868
1869
1870/** Opcode 0x0f 0x47. */
1871FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1872{
1873 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1874 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1875}
1876
1877
1878/** Opcode 0x0f 0x48. */
1879FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1880{
1881 IEMOP_MNEMONIC("cmovs Gv,Ev");
1882 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1883}
1884
1885
1886/** Opcode 0x0f 0x49. */
1887FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1888{
1889 IEMOP_MNEMONIC("cmovns Gv,Ev");
1890 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1891}
1892
1893
1894/** Opcode 0x0f 0x4a. */
1895FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1896{
1897 IEMOP_MNEMONIC("cmovp Gv,Ev");
1898 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1899}
1900
1901
1902/** Opcode 0x0f 0x4b. */
1903FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1904{
1905 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1906 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1907}
1908
1909
1910/** Opcode 0x0f 0x4c. */
1911FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1912{
1913 IEMOP_MNEMONIC("cmovl Gv,Ev");
1914 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1915}
1916
1917
1918/** Opcode 0x0f 0x4d. */
1919FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1920{
1921 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1922 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1923}
1924
1925
1926/** Opcode 0x0f 0x4e. */
1927FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1928{
1929 IEMOP_MNEMONIC("cmovle Gv,Ev");
1930 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1931}
1932
1933
1934/** Opcode 0x0f 0x4f. */
1935FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1936{
1937 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1938 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1939}
1940
1941#undef CMOV_X
1942
1943/** Opcode 0x0f 0x50. */
1944FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1945/** Opcode 0x0f 0x51. */
1946FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1947/** Opcode 0x0f 0x52. */
1948FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1949/** Opcode 0x0f 0x53. */
1950FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1951/** Opcode 0x0f 0x54. */
1952FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1953/** Opcode 0x0f 0x55. */
1954FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1955/** Opcode 0x0f 0x56. */
1956FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1957/** Opcode 0x0f 0x57. */
1958FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1959/** Opcode 0x0f 0x58. */
1960FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); //NEXT
1961/** Opcode 0x0f 0x59. */
1962FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);//NEXT
1963/** Opcode 0x0f 0x5a. */
1964FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1965/** Opcode 0x0f 0x5b. */
1966FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1967/** Opcode 0x0f 0x5c. */
1968FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1969/** Opcode 0x0f 0x5d. */
1970FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1971/** Opcode 0x0f 0x5e. */
1972FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1973/** Opcode 0x0f 0x5f. */
1974FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1975
1976
1977/**
1978 * Common worker for SSE2 and MMX instructions on the forms:
1979 * pxxxx xmm1, xmm2/mem128
1980 * pxxxx mm1, mm2/mem32
1981 *
1982 * The 2nd operand is the first half of a register, which in the memory case
1983 * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit
1984 * memory accessed for MMX.
1985 *
1986 * Exceptions type 4.
1987 */
1988FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl)
1989{
1990 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1991 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
1992 {
1993 case IEM_OP_PRF_SIZE_OP: /* SSE */
1994 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1995 {
1996 /*
1997 * Register, register.
1998 */
1999 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2000 IEM_MC_BEGIN(2, 0);
2001 IEM_MC_ARG(uint128_t *, pDst, 0);
2002 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2003 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2004 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2005 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2006 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2007 IEM_MC_ADVANCE_RIP();
2008 IEM_MC_END();
2009 }
2010 else
2011 {
2012 /*
2013 * Register, memory.
2014 */
2015 IEM_MC_BEGIN(2, 2);
2016 IEM_MC_ARG(uint128_t *, pDst, 0);
2017 IEM_MC_LOCAL(uint64_t, uSrc);
2018 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2019 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2020
2021 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2022 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2023 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2024 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2025
2026 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2027 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2028
2029 IEM_MC_ADVANCE_RIP();
2030 IEM_MC_END();
2031 }
2032 return VINF_SUCCESS;
2033
2034 case 0: /* MMX */
2035 if (!pImpl->pfnU64)
2036 return IEMOP_RAISE_INVALID_OPCODE();
2037 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2038 {
2039 /*
2040 * Register, register.
2041 */
2042 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2043 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2044 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2045 IEM_MC_BEGIN(2, 0);
2046 IEM_MC_ARG(uint64_t *, pDst, 0);
2047 IEM_MC_ARG(uint32_t const *, pSrc, 1);
2048 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2049 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2050 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2051 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2052 IEM_MC_ADVANCE_RIP();
2053 IEM_MC_END();
2054 }
2055 else
2056 {
2057 /*
2058 * Register, memory.
2059 */
2060 IEM_MC_BEGIN(2, 2);
2061 IEM_MC_ARG(uint64_t *, pDst, 0);
2062 IEM_MC_LOCAL(uint32_t, uSrc);
2063 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1);
2064 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2065
2066 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2067 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2068 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2069 IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2070
2071 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2072 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2073
2074 IEM_MC_ADVANCE_RIP();
2075 IEM_MC_END();
2076 }
2077 return VINF_SUCCESS;
2078
2079 default:
2080 return IEMOP_RAISE_INVALID_OPCODE();
2081 }
2082}
2083
2084
2085/** Opcode 0x0f 0x60. */
2086FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq)
2087{
2088 IEMOP_MNEMONIC("punpcklbw");
2089 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw);
2090}
2091
2092
2093/** Opcode 0x0f 0x61. */
2094FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq)
2095{
2096 IEMOP_MNEMONIC("punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */
2097 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd);
2098}
2099
2100
2101/** Opcode 0x0f 0x62. */
2102FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq)
2103{
2104 IEMOP_MNEMONIC("punpckldq");
2105 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq);
2106}
2107
2108
2109/** Opcode 0x0f 0x63. */
2110FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
2111/** Opcode 0x0f 0x64. */
2112FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
2113/** Opcode 0x0f 0x65. */
2114FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
2115/** Opcode 0x0f 0x66. */
2116FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
2117/** Opcode 0x0f 0x67. */
2118FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
2119
2120
2121/**
2122 * Common worker for SSE2 and MMX instructions on the forms:
2123 * pxxxx xmm1, xmm2/mem128
2124 * pxxxx mm1, mm2/mem64
2125 *
2126 * The 2nd operand is the second half of a register, which in the memory case
2127 * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access
2128 * where it may read the full 128 bits or only the upper 64 bits.
2129 *
2130 * Exceptions type 4.
2131 */
2132FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl)
2133{
2134 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2135 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2136 {
2137 case IEM_OP_PRF_SIZE_OP: /* SSE */
2138 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2139 {
2140 /*
2141 * Register, register.
2142 */
2143 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2144 IEM_MC_BEGIN(2, 0);
2145 IEM_MC_ARG(uint128_t *, pDst, 0);
2146 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2147 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2148 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2149 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2150 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2151 IEM_MC_ADVANCE_RIP();
2152 IEM_MC_END();
2153 }
2154 else
2155 {
2156 /*
2157 * Register, memory.
2158 */
2159 IEM_MC_BEGIN(2, 2);
2160 IEM_MC_ARG(uint128_t *, pDst, 0);
2161 IEM_MC_LOCAL(uint128_t, uSrc);
2162 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2163 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2164
2165 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2166 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2167 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2168 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */
2169
2170 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2171 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2172
2173 IEM_MC_ADVANCE_RIP();
2174 IEM_MC_END();
2175 }
2176 return VINF_SUCCESS;
2177
2178 case 0: /* MMX */
2179 if (!pImpl->pfnU64)
2180 return IEMOP_RAISE_INVALID_OPCODE();
2181 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2182 {
2183 /*
2184 * Register, register.
2185 */
2186 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2187 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2188 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2189 IEM_MC_BEGIN(2, 0);
2190 IEM_MC_ARG(uint64_t *, pDst, 0);
2191 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2192 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2193 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2194 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2195 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2196 IEM_MC_ADVANCE_RIP();
2197 IEM_MC_END();
2198 }
2199 else
2200 {
2201 /*
2202 * Register, memory.
2203 */
2204 IEM_MC_BEGIN(2, 2);
2205 IEM_MC_ARG(uint64_t *, pDst, 0);
2206 IEM_MC_LOCAL(uint64_t, uSrc);
2207 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2208 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2209
2210 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2211 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2212 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2213 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2214
2215 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2216 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2217
2218 IEM_MC_ADVANCE_RIP();
2219 IEM_MC_END();
2220 }
2221 return VINF_SUCCESS;
2222
2223 default:
2224 return IEMOP_RAISE_INVALID_OPCODE();
2225 }
2226}
2227
2228
2229/** Opcode 0x0f 0x68. */
2230FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq)
2231{
2232 IEMOP_MNEMONIC("punpckhbw");
2233 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw);
2234}
2235
2236
2237/** Opcode 0x0f 0x69. */
2238FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq)
2239{
2240 IEMOP_MNEMONIC("punpckhwd");
2241 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd);
2242}
2243
2244
2245/** Opcode 0x0f 0x6a. */
2246FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq)
2247{
2248 IEMOP_MNEMONIC("punpckhdq");
2249 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq);
2250}
2251
2252/** Opcode 0x0f 0x6b. */
2253FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
2254
2255
2256/** Opcode 0x0f 0x6c. */
2257FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq)
2258{
2259 IEMOP_MNEMONIC("punpcklqdq");
2260 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq);
2261}
2262
2263
2264/** Opcode 0x0f 0x6d. */
2265FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq)
2266{
2267 IEMOP_MNEMONIC("punpckhqdq");
2268 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq);
2269}
2270
2271
2272/** Opcode 0x0f 0x6e. */
2273FNIEMOP_DEF(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey)
2274{
2275 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2276 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2277 {
2278 case IEM_OP_PRF_SIZE_OP: /* SSE */
2279 IEMOP_MNEMONIC("movd/q Wd/q,Ed/q");
2280 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2281 {
2282 /* XMM, greg*/
2283 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2284 IEM_MC_BEGIN(0, 1);
2285 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2286 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2287 {
2288 IEM_MC_LOCAL(uint64_t, u64Tmp);
2289 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2290 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2291 }
2292 else
2293 {
2294 IEM_MC_LOCAL(uint32_t, u32Tmp);
2295 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2296 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2297 }
2298 IEM_MC_ADVANCE_RIP();
2299 IEM_MC_END();
2300 }
2301 else
2302 {
2303 /* XMM, [mem] */
2304 IEM_MC_BEGIN(0, 2);
2305 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2306 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2307 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2308 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2309 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2310 {
2311 IEM_MC_LOCAL(uint64_t, u64Tmp);
2312 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2313 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2314 }
2315 else
2316 {
2317 IEM_MC_LOCAL(uint32_t, u32Tmp);
2318 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2319 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2320 }
2321 IEM_MC_ADVANCE_RIP();
2322 IEM_MC_END();
2323 }
2324 return VINF_SUCCESS;
2325
2326 case 0: /* MMX */
2327 IEMOP_MNEMONIC("movq/d Pd/q,Ed/q");
2328 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2329 {
2330 /* MMX, greg */
2331 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2332 IEM_MC_BEGIN(0, 1);
2333 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2334 IEM_MC_LOCAL(uint64_t, u64Tmp);
2335 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2336 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2337 else
2338 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2339 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2340 IEM_MC_ADVANCE_RIP();
2341 IEM_MC_END();
2342 }
2343 else
2344 {
2345 /* MMX, [mem] */
2346 IEM_MC_BEGIN(0, 2);
2347 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2348 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2349 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2350 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2351 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2352 {
2353 IEM_MC_LOCAL(uint64_t, u64Tmp);
2354 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2355 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2356 }
2357 else
2358 {
2359 IEM_MC_LOCAL(uint32_t, u32Tmp);
2360 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2361 IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp);
2362 }
2363 IEM_MC_ADVANCE_RIP();
2364 IEM_MC_END();
2365 }
2366 return VINF_SUCCESS;
2367
2368 default:
2369 return IEMOP_RAISE_INVALID_OPCODE();
2370 }
2371}
2372
2373
2374/** Opcode 0x0f 0x6f. */
2375FNIEMOP_DEF(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq)
2376{
2377 bool fAligned = false;
2378 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2379 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2380 {
2381 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
2382 fAligned = true;
2383 case IEM_OP_PRF_REPZ: /* SSE unaligned */
2384 if (fAligned)
2385 IEMOP_MNEMONIC("movdqa Vdq,Wdq");
2386 else
2387 IEMOP_MNEMONIC("movdqu Vdq,Wdq");
2388 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2389 {
2390 /*
2391 * Register, register.
2392 */
2393 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2394 IEM_MC_BEGIN(0, 1);
2395 IEM_MC_LOCAL(uint128_t, u128Tmp);
2396 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2397 IEM_MC_FETCH_XREG_U128(u128Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2398 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2399 IEM_MC_ADVANCE_RIP();
2400 IEM_MC_END();
2401 }
2402 else
2403 {
2404 /*
2405 * Register, memory.
2406 */
2407 IEM_MC_BEGIN(0, 2);
2408 IEM_MC_LOCAL(uint128_t, u128Tmp);
2409 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2410
2411 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2412 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2413 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2414 if (fAligned)
2415 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2416 else
2417 IEM_MC_FETCH_MEM_U128(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2418 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2419
2420 IEM_MC_ADVANCE_RIP();
2421 IEM_MC_END();
2422 }
2423 return VINF_SUCCESS;
2424
2425 case 0: /* MMX */
2426 IEMOP_MNEMONIC("movq Pq,Qq");
2427 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2428 {
2429 /*
2430 * Register, register.
2431 */
2432 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2433 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2434 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2435 IEM_MC_BEGIN(0, 1);
2436 IEM_MC_LOCAL(uint64_t, u64Tmp);
2437 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2438 IEM_MC_FETCH_MREG_U64(u64Tmp, bRm & X86_MODRM_RM_MASK);
2439 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2440 IEM_MC_ADVANCE_RIP();
2441 IEM_MC_END();
2442 }
2443 else
2444 {
2445 /*
2446 * Register, memory.
2447 */
2448 IEM_MC_BEGIN(0, 2);
2449 IEM_MC_LOCAL(uint64_t, u64Tmp);
2450 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2451
2452 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2453 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2454 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2455 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2456 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2457
2458 IEM_MC_ADVANCE_RIP();
2459 IEM_MC_END();
2460 }
2461 return VINF_SUCCESS;
2462
2463 default:
2464 return IEMOP_RAISE_INVALID_OPCODE();
2465 }
2466}
2467
2468
2469/** Opcode 0x0f 0x70. The immediate here is evil! */
2470FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib)
2471{
2472 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2473 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2474 {
2475 case IEM_OP_PRF_SIZE_OP: /* SSE */
2476 case IEM_OP_PRF_REPNZ: /* SSE */
2477 case IEM_OP_PRF_REPZ: /* SSE */
2478 {
2479 PFNIEMAIMPLMEDIAPSHUF pfnAImpl;
2480 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2481 {
2482 case IEM_OP_PRF_SIZE_OP:
2483 IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib");
2484 pfnAImpl = iemAImpl_pshufd;
2485 break;
2486 case IEM_OP_PRF_REPNZ:
2487 IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib");
2488 pfnAImpl = iemAImpl_pshuflw;
2489 break;
2490 case IEM_OP_PRF_REPZ:
2491 IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib");
2492 pfnAImpl = iemAImpl_pshufhw;
2493 break;
2494 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2495 }
2496 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2497 {
2498 /*
2499 * Register, register.
2500 */
2501 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2502 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2503
2504 IEM_MC_BEGIN(3, 0);
2505 IEM_MC_ARG(uint128_t *, pDst, 0);
2506 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2507 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2508 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2509 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2510 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2511 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2512 IEM_MC_ADVANCE_RIP();
2513 IEM_MC_END();
2514 }
2515 else
2516 {
2517 /*
2518 * Register, memory.
2519 */
2520 IEM_MC_BEGIN(3, 2);
2521 IEM_MC_ARG(uint128_t *, pDst, 0);
2522 IEM_MC_LOCAL(uint128_t, uSrc);
2523 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2524 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2525
2526 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2527 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2528 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2529 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2530 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2531
2532 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2533 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2534 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2535
2536 IEM_MC_ADVANCE_RIP();
2537 IEM_MC_END();
2538 }
2539 return VINF_SUCCESS;
2540 }
2541
2542 case 0: /* MMX Extension */
2543 IEMOP_MNEMONIC("pshufw Pq,Qq,Ib");
2544 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2545 {
2546 /*
2547 * Register, register.
2548 */
2549 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2550 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2551
2552 IEM_MC_BEGIN(3, 0);
2553 IEM_MC_ARG(uint64_t *, pDst, 0);
2554 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2555 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2556 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2557 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2558 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2559 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2560 IEM_MC_ADVANCE_RIP();
2561 IEM_MC_END();
2562 }
2563 else
2564 {
2565 /*
2566 * Register, memory.
2567 */
2568 IEM_MC_BEGIN(3, 2);
2569 IEM_MC_ARG(uint64_t *, pDst, 0);
2570 IEM_MC_LOCAL(uint64_t, uSrc);
2571 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2572 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2573
2574 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2575 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2576 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2577 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2578 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2579
2580 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2581 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2582 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2583
2584 IEM_MC_ADVANCE_RIP();
2585 IEM_MC_END();
2586 }
2587 return VINF_SUCCESS;
2588
2589 default:
2590 return IEMOP_RAISE_INVALID_OPCODE();
2591 }
2592}
2593
2594
2595/** Opcode 0x0f 0x71 11/2. */
2596FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Nq_Ib, uint8_t, bRm);
2597
2598/** Opcode 0x66 0x0f 0x71 11/2. */
2599FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Udq_Ib, uint8_t, bRm);
2600
2601/** Opcode 0x0f 0x71 11/4. */
2602FNIEMOP_STUB_1(iemOp_Grp12_psraw_Nq_Ib, uint8_t, bRm);
2603
2604/** Opcode 0x66 0x0f 0x71 11/4. */
2605FNIEMOP_STUB_1(iemOp_Grp12_psraw_Udq_Ib, uint8_t, bRm);
2606
2607/** Opcode 0x0f 0x71 11/6. */
2608FNIEMOP_STUB_1(iemOp_Grp12_psllw_Nq_Ib, uint8_t, bRm);
2609
2610/** Opcode 0x66 0x0f 0x71 11/6. */
2611FNIEMOP_STUB_1(iemOp_Grp12_psllw_Udq_Ib, uint8_t, bRm);
2612
2613
2614/** Opcode 0x0f 0x71. */
2615FNIEMOP_DEF(iemOp_Grp12)
2616{
2617 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2618 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2619 return IEMOP_RAISE_INVALID_OPCODE();
2620 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2621 {
2622 case 0: case 1: case 3: case 5: case 7:
2623 return IEMOP_RAISE_INVALID_OPCODE();
2624 case 2:
2625 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2626 {
2627 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Nq_Ib, bRm);
2628 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Udq_Ib, bRm);
2629 default: return IEMOP_RAISE_INVALID_OPCODE();
2630 }
2631 case 4:
2632 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2633 {
2634 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Nq_Ib, bRm);
2635 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Udq_Ib, bRm);
2636 default: return IEMOP_RAISE_INVALID_OPCODE();
2637 }
2638 case 6:
2639 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2640 {
2641 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Nq_Ib, bRm);
2642 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Udq_Ib, bRm);
2643 default: return IEMOP_RAISE_INVALID_OPCODE();
2644 }
2645 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2646 }
2647}
2648
2649
2650/** Opcode 0x0f 0x72 11/2. */
2651FNIEMOP_STUB_1(iemOp_Grp13_psrld_Nq_Ib, uint8_t, bRm);
2652
2653/** Opcode 0x66 0x0f 0x72 11/2. */
2654FNIEMOP_STUB_1(iemOp_Grp13_psrld_Udq_Ib, uint8_t, bRm);
2655
2656/** Opcode 0x0f 0x72 11/4. */
2657FNIEMOP_STUB_1(iemOp_Grp13_psrad_Nq_Ib, uint8_t, bRm);
2658
2659/** Opcode 0x66 0x0f 0x72 11/4. */
2660FNIEMOP_STUB_1(iemOp_Grp13_psrad_Udq_Ib, uint8_t, bRm);
2661
2662/** Opcode 0x0f 0x72 11/6. */
2663FNIEMOP_STUB_1(iemOp_Grp13_pslld_Nq_Ib, uint8_t, bRm);
2664
2665/** Opcode 0x66 0x0f 0x72 11/6. */
2666FNIEMOP_STUB_1(iemOp_Grp13_pslld_Udq_Ib, uint8_t, bRm);
2667
2668
2669/** Opcode 0x0f 0x72. */
2670FNIEMOP_DEF(iemOp_Grp13)
2671{
2672 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2673 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2674 return IEMOP_RAISE_INVALID_OPCODE();
2675 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2676 {
2677 case 0: case 1: case 3: case 5: case 7:
2678 return IEMOP_RAISE_INVALID_OPCODE();
2679 case 2:
2680 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2681 {
2682 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Nq_Ib, bRm);
2683 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Udq_Ib, bRm);
2684 default: return IEMOP_RAISE_INVALID_OPCODE();
2685 }
2686 case 4:
2687 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2688 {
2689 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Nq_Ib, bRm);
2690 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Udq_Ib, bRm);
2691 default: return IEMOP_RAISE_INVALID_OPCODE();
2692 }
2693 case 6:
2694 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2695 {
2696 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Nq_Ib, bRm);
2697 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Udq_Ib, bRm);
2698 default: return IEMOP_RAISE_INVALID_OPCODE();
2699 }
2700 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2701 }
2702}
2703
2704
2705/** Opcode 0x0f 0x73 11/2. */
2706FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Nq_Ib, uint8_t, bRm);
2707
2708/** Opcode 0x66 0x0f 0x73 11/2. */
2709FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Udq_Ib, uint8_t, bRm);
2710
2711/** Opcode 0x66 0x0f 0x73 11/3. */
2712FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); //NEXT
2713
2714/** Opcode 0x0f 0x73 11/6. */
2715FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm);
2716
2717/** Opcode 0x66 0x0f 0x73 11/6. */
2718FNIEMOP_STUB_1(iemOp_Grp14_psllq_Udq_Ib, uint8_t, bRm);
2719
2720/** Opcode 0x66 0x0f 0x73 11/7. */
2721FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); //NEXT
2722
2723
2724/** Opcode 0x0f 0x73. */
2725FNIEMOP_DEF(iemOp_Grp14)
2726{
2727 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2728 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2729 return IEMOP_RAISE_INVALID_OPCODE();
2730 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2731 {
2732 case 0: case 1: case 4: case 5:
2733 return IEMOP_RAISE_INVALID_OPCODE();
2734 case 2:
2735 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2736 {
2737 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Nq_Ib, bRm);
2738 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Udq_Ib, bRm);
2739 default: return IEMOP_RAISE_INVALID_OPCODE();
2740 }
2741 case 3:
2742 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2743 {
2744 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrldq_Udq_Ib, bRm);
2745 default: return IEMOP_RAISE_INVALID_OPCODE();
2746 }
2747 case 6:
2748 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2749 {
2750 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Nq_Ib, bRm);
2751 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Udq_Ib, bRm);
2752 default: return IEMOP_RAISE_INVALID_OPCODE();
2753 }
2754 case 7:
2755 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2756 {
2757 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_pslldq_Udq_Ib, bRm);
2758 default: return IEMOP_RAISE_INVALID_OPCODE();
2759 }
2760 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2761 }
2762}
2763
2764
2765/**
2766 * Common worker for SSE2 and MMX instructions on the forms:
2767 * pxxx mm1, mm2/mem64
2768 * pxxx xmm1, xmm2/mem128
2769 *
2770 * Proper alignment of the 128-bit operand is enforced.
2771 * Exceptions type 4. SSE2 and MMX cpuid checks.
2772 */
2773FNIEMOP_DEF_1(iemOpCommonMmxSse2_FullFull_To_Full, PCIEMOPMEDIAF2, pImpl)
2774{
2775 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2776 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2777 {
2778 case IEM_OP_PRF_SIZE_OP: /* SSE */
2779 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2780 {
2781 /*
2782 * Register, register.
2783 */
2784 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2785 IEM_MC_BEGIN(2, 0);
2786 IEM_MC_ARG(uint128_t *, pDst, 0);
2787 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2788 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2789 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2790 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2791 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2792 IEM_MC_ADVANCE_RIP();
2793 IEM_MC_END();
2794 }
2795 else
2796 {
2797 /*
2798 * Register, memory.
2799 */
2800 IEM_MC_BEGIN(2, 2);
2801 IEM_MC_ARG(uint128_t *, pDst, 0);
2802 IEM_MC_LOCAL(uint128_t, uSrc);
2803 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2804 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2805
2806 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2807 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2808 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2809 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2810
2811 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2812 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2813
2814 IEM_MC_ADVANCE_RIP();
2815 IEM_MC_END();
2816 }
2817 return VINF_SUCCESS;
2818
2819 case 0: /* MMX */
2820 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2821 {
2822 /*
2823 * Register, register.
2824 */
2825 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2826 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2827 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2828 IEM_MC_BEGIN(2, 0);
2829 IEM_MC_ARG(uint64_t *, pDst, 0);
2830 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2831 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2832 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2833 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2834 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2835 IEM_MC_ADVANCE_RIP();
2836 IEM_MC_END();
2837 }
2838 else
2839 {
2840 /*
2841 * Register, memory.
2842 */
2843 IEM_MC_BEGIN(2, 2);
2844 IEM_MC_ARG(uint64_t *, pDst, 0);
2845 IEM_MC_LOCAL(uint64_t, uSrc);
2846 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2847 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2848
2849 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2850 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2851 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2852 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2853
2854 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2855 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2856
2857 IEM_MC_ADVANCE_RIP();
2858 IEM_MC_END();
2859 }
2860 return VINF_SUCCESS;
2861
2862 default:
2863 return IEMOP_RAISE_INVALID_OPCODE();
2864 }
2865}
2866
2867
2868/** Opcode 0x0f 0x74. */
2869FNIEMOP_DEF(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq)
2870{
2871 IEMOP_MNEMONIC("pcmpeqb");
2872 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqb);
2873}
2874
2875
2876/** Opcode 0x0f 0x75. */
2877FNIEMOP_DEF(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq)
2878{
2879 IEMOP_MNEMONIC("pcmpeqw");
2880 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqw);
2881}
2882
2883
2884/** Opcode 0x0f 0x76. */
2885FNIEMOP_DEF(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq)
2886{
2887 IEMOP_MNEMONIC("pcmpeqd");
2888 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqd);
2889}
2890
2891
2892/** Opcode 0x0f 0x77. */
2893FNIEMOP_STUB(iemOp_emms);
2894/** Opcode 0x0f 0x78. */
2895FNIEMOP_UD_STUB(iemOp_vmread_AmdGrp17);
2896/** Opcode 0x0f 0x79. */
2897FNIEMOP_UD_STUB(iemOp_vmwrite);
2898/** Opcode 0x0f 0x7c. */
2899FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
2900/** Opcode 0x0f 0x7d. */
2901FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
2902
2903
2904/** Opcode 0x0f 0x7e. */
2905FNIEMOP_DEF(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq)
2906{
2907 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2908 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2909 {
2910 case IEM_OP_PRF_SIZE_OP: /* SSE */
2911 IEMOP_MNEMONIC("movd/q Ed/q,Wd/q");
2912 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2913 {
2914 /* greg, XMM */
2915 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2916 IEM_MC_BEGIN(0, 1);
2917 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2918 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2919 {
2920 IEM_MC_LOCAL(uint64_t, u64Tmp);
2921 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2922 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
2923 }
2924 else
2925 {
2926 IEM_MC_LOCAL(uint32_t, u32Tmp);
2927 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2928 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
2929 }
2930 IEM_MC_ADVANCE_RIP();
2931 IEM_MC_END();
2932 }
2933 else
2934 {
2935 /* [mem], XMM */
2936 IEM_MC_BEGIN(0, 2);
2937 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2938 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2939 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2940 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2941 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2942 {
2943 IEM_MC_LOCAL(uint64_t, u64Tmp);
2944 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2945 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
2946 }
2947 else
2948 {
2949 IEM_MC_LOCAL(uint32_t, u32Tmp);
2950 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2951 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
2952 }
2953 IEM_MC_ADVANCE_RIP();
2954 IEM_MC_END();
2955 }
2956 return VINF_SUCCESS;
2957
2958 case 0: /* MMX */
2959 IEMOP_MNEMONIC("movq/d Ed/q,Pd/q");
2960 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2961 {
2962 /* greg, MMX */
2963 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2964 IEM_MC_BEGIN(0, 1);
2965 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2966 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2967 {
2968 IEM_MC_LOCAL(uint64_t, u64Tmp);
2969 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2970 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
2971 }
2972 else
2973 {
2974 IEM_MC_LOCAL(uint32_t, u32Tmp);
2975 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2976 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
2977 }
2978 IEM_MC_ADVANCE_RIP();
2979 IEM_MC_END();
2980 }
2981 else
2982 {
2983 /* [mem], MMX */
2984 IEM_MC_BEGIN(0, 2);
2985 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2986 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2987 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2988 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2989 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2990 {
2991 IEM_MC_LOCAL(uint64_t, u64Tmp);
2992 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2993 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
2994 }
2995 else
2996 {
2997 IEM_MC_LOCAL(uint32_t, u32Tmp);
2998 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2999 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
3000 }
3001 IEM_MC_ADVANCE_RIP();
3002 IEM_MC_END();
3003 }
3004 return VINF_SUCCESS;
3005
3006 default:
3007 return IEMOP_RAISE_INVALID_OPCODE();
3008 }
3009}
3010
3011
3012/** Opcode 0x0f 0x7f. */
3013FNIEMOP_DEF(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq)
3014{
3015 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3016 bool fAligned = false;
3017 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
3018 {
3019 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
3020 fAligned = true;
3021 case IEM_OP_PRF_REPZ: /* SSE unaligned */
3022 if (fAligned)
3023 IEMOP_MNEMONIC("movdqa Wdq,Vdq");
3024 else
3025 IEMOP_MNEMONIC("movdqu Wdq,Vdq");
3026 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3027 {
3028 /*
3029 * Register, register.
3030 */
3031 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3032 IEM_MC_BEGIN(0, 1);
3033 IEM_MC_LOCAL(uint128_t, u128Tmp);
3034 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3035 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3036 IEM_MC_STORE_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u128Tmp);
3037 IEM_MC_ADVANCE_RIP();
3038 IEM_MC_END();
3039 }
3040 else
3041 {
3042 /*
3043 * Register, memory.
3044 */
3045 IEM_MC_BEGIN(0, 2);
3046 IEM_MC_LOCAL(uint128_t, u128Tmp);
3047 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3048
3049 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3050 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3051 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3052 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3053 if (fAligned)
3054 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3055 else
3056 IEM_MC_STORE_MEM_U128(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3057
3058 IEM_MC_ADVANCE_RIP();
3059 IEM_MC_END();
3060 }
3061 return VINF_SUCCESS;
3062
3063 case 0: /* MMX */
3064 IEMOP_MNEMONIC("movq Qq,Pq");
3065
3066 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3067 {
3068 /*
3069 * Register, register.
3070 */
3071 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
3072 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
3073 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3074 IEM_MC_BEGIN(0, 1);
3075 IEM_MC_LOCAL(uint64_t, u64Tmp);
3076 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3077 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3078 IEM_MC_STORE_MREG_U64(bRm & X86_MODRM_RM_MASK, u64Tmp);
3079 IEM_MC_ADVANCE_RIP();
3080 IEM_MC_END();
3081 }
3082 else
3083 {
3084 /*
3085 * Register, memory.
3086 */
3087 IEM_MC_BEGIN(0, 2);
3088 IEM_MC_LOCAL(uint64_t, u64Tmp);
3089 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3090
3091 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3092 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3093 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3094 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3095 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
3096
3097 IEM_MC_ADVANCE_RIP();
3098 IEM_MC_END();
3099 }
3100 return VINF_SUCCESS;
3101
3102 default:
3103 return IEMOP_RAISE_INVALID_OPCODE();
3104 }
3105}
3106
3107
3108
3109/** Opcode 0x0f 0x80. */
3110FNIEMOP_DEF(iemOp_jo_Jv)
3111{
3112 IEMOP_MNEMONIC("jo Jv");
3113 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3114 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3115 {
3116 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3117 IEMOP_HLP_NO_LOCK_PREFIX();
3118
3119 IEM_MC_BEGIN(0, 0);
3120 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3121 IEM_MC_REL_JMP_S16(i16Imm);
3122 } IEM_MC_ELSE() {
3123 IEM_MC_ADVANCE_RIP();
3124 } IEM_MC_ENDIF();
3125 IEM_MC_END();
3126 }
3127 else
3128 {
3129 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3130 IEMOP_HLP_NO_LOCK_PREFIX();
3131
3132 IEM_MC_BEGIN(0, 0);
3133 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3134 IEM_MC_REL_JMP_S32(i32Imm);
3135 } IEM_MC_ELSE() {
3136 IEM_MC_ADVANCE_RIP();
3137 } IEM_MC_ENDIF();
3138 IEM_MC_END();
3139 }
3140 return VINF_SUCCESS;
3141}
3142
3143
3144/** Opcode 0x0f 0x81. */
3145FNIEMOP_DEF(iemOp_jno_Jv)
3146{
3147 IEMOP_MNEMONIC("jno Jv");
3148 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3149 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3150 {
3151 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3152 IEMOP_HLP_NO_LOCK_PREFIX();
3153
3154 IEM_MC_BEGIN(0, 0);
3155 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3156 IEM_MC_ADVANCE_RIP();
3157 } IEM_MC_ELSE() {
3158 IEM_MC_REL_JMP_S16(i16Imm);
3159 } IEM_MC_ENDIF();
3160 IEM_MC_END();
3161 }
3162 else
3163 {
3164 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3165 IEMOP_HLP_NO_LOCK_PREFIX();
3166
3167 IEM_MC_BEGIN(0, 0);
3168 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3169 IEM_MC_ADVANCE_RIP();
3170 } IEM_MC_ELSE() {
3171 IEM_MC_REL_JMP_S32(i32Imm);
3172 } IEM_MC_ENDIF();
3173 IEM_MC_END();
3174 }
3175 return VINF_SUCCESS;
3176}
3177
3178
3179/** Opcode 0x0f 0x82. */
3180FNIEMOP_DEF(iemOp_jc_Jv)
3181{
3182 IEMOP_MNEMONIC("jc/jb/jnae Jv");
3183 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3184 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3185 {
3186 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3187 IEMOP_HLP_NO_LOCK_PREFIX();
3188
3189 IEM_MC_BEGIN(0, 0);
3190 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3191 IEM_MC_REL_JMP_S16(i16Imm);
3192 } IEM_MC_ELSE() {
3193 IEM_MC_ADVANCE_RIP();
3194 } IEM_MC_ENDIF();
3195 IEM_MC_END();
3196 }
3197 else
3198 {
3199 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3200 IEMOP_HLP_NO_LOCK_PREFIX();
3201
3202 IEM_MC_BEGIN(0, 0);
3203 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3204 IEM_MC_REL_JMP_S32(i32Imm);
3205 } IEM_MC_ELSE() {
3206 IEM_MC_ADVANCE_RIP();
3207 } IEM_MC_ENDIF();
3208 IEM_MC_END();
3209 }
3210 return VINF_SUCCESS;
3211}
3212
3213
3214/** Opcode 0x0f 0x83. */
3215FNIEMOP_DEF(iemOp_jnc_Jv)
3216{
3217 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
3218 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3219 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3220 {
3221 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3222 IEMOP_HLP_NO_LOCK_PREFIX();
3223
3224 IEM_MC_BEGIN(0, 0);
3225 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3226 IEM_MC_ADVANCE_RIP();
3227 } IEM_MC_ELSE() {
3228 IEM_MC_REL_JMP_S16(i16Imm);
3229 } IEM_MC_ENDIF();
3230 IEM_MC_END();
3231 }
3232 else
3233 {
3234 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3235 IEMOP_HLP_NO_LOCK_PREFIX();
3236
3237 IEM_MC_BEGIN(0, 0);
3238 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3239 IEM_MC_ADVANCE_RIP();
3240 } IEM_MC_ELSE() {
3241 IEM_MC_REL_JMP_S32(i32Imm);
3242 } IEM_MC_ENDIF();
3243 IEM_MC_END();
3244 }
3245 return VINF_SUCCESS;
3246}
3247
3248
3249/** Opcode 0x0f 0x84. */
3250FNIEMOP_DEF(iemOp_je_Jv)
3251{
3252 IEMOP_MNEMONIC("je/jz Jv");
3253 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3254 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3255 {
3256 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3257 IEMOP_HLP_NO_LOCK_PREFIX();
3258
3259 IEM_MC_BEGIN(0, 0);
3260 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3261 IEM_MC_REL_JMP_S16(i16Imm);
3262 } IEM_MC_ELSE() {
3263 IEM_MC_ADVANCE_RIP();
3264 } IEM_MC_ENDIF();
3265 IEM_MC_END();
3266 }
3267 else
3268 {
3269 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3270 IEMOP_HLP_NO_LOCK_PREFIX();
3271
3272 IEM_MC_BEGIN(0, 0);
3273 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3274 IEM_MC_REL_JMP_S32(i32Imm);
3275 } IEM_MC_ELSE() {
3276 IEM_MC_ADVANCE_RIP();
3277 } IEM_MC_ENDIF();
3278 IEM_MC_END();
3279 }
3280 return VINF_SUCCESS;
3281}
3282
3283
3284/** Opcode 0x0f 0x85. */
3285FNIEMOP_DEF(iemOp_jne_Jv)
3286{
3287 IEMOP_MNEMONIC("jne/jnz Jv");
3288 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3289 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3290 {
3291 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3292 IEMOP_HLP_NO_LOCK_PREFIX();
3293
3294 IEM_MC_BEGIN(0, 0);
3295 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3296 IEM_MC_ADVANCE_RIP();
3297 } IEM_MC_ELSE() {
3298 IEM_MC_REL_JMP_S16(i16Imm);
3299 } IEM_MC_ENDIF();
3300 IEM_MC_END();
3301 }
3302 else
3303 {
3304 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3305 IEMOP_HLP_NO_LOCK_PREFIX();
3306
3307 IEM_MC_BEGIN(0, 0);
3308 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3309 IEM_MC_ADVANCE_RIP();
3310 } IEM_MC_ELSE() {
3311 IEM_MC_REL_JMP_S32(i32Imm);
3312 } IEM_MC_ENDIF();
3313 IEM_MC_END();
3314 }
3315 return VINF_SUCCESS;
3316}
3317
3318
3319/** Opcode 0x0f 0x86. */
3320FNIEMOP_DEF(iemOp_jbe_Jv)
3321{
3322 IEMOP_MNEMONIC("jbe/jna Jv");
3323 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3324 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3325 {
3326 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3327 IEMOP_HLP_NO_LOCK_PREFIX();
3328
3329 IEM_MC_BEGIN(0, 0);
3330 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3331 IEM_MC_REL_JMP_S16(i16Imm);
3332 } IEM_MC_ELSE() {
3333 IEM_MC_ADVANCE_RIP();
3334 } IEM_MC_ENDIF();
3335 IEM_MC_END();
3336 }
3337 else
3338 {
3339 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3340 IEMOP_HLP_NO_LOCK_PREFIX();
3341
3342 IEM_MC_BEGIN(0, 0);
3343 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3344 IEM_MC_REL_JMP_S32(i32Imm);
3345 } IEM_MC_ELSE() {
3346 IEM_MC_ADVANCE_RIP();
3347 } IEM_MC_ENDIF();
3348 IEM_MC_END();
3349 }
3350 return VINF_SUCCESS;
3351}
3352
3353
3354/** Opcode 0x0f 0x87. */
3355FNIEMOP_DEF(iemOp_jnbe_Jv)
3356{
3357 IEMOP_MNEMONIC("jnbe/ja Jv");
3358 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3359 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3360 {
3361 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3362 IEMOP_HLP_NO_LOCK_PREFIX();
3363
3364 IEM_MC_BEGIN(0, 0);
3365 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3366 IEM_MC_ADVANCE_RIP();
3367 } IEM_MC_ELSE() {
3368 IEM_MC_REL_JMP_S16(i16Imm);
3369 } IEM_MC_ENDIF();
3370 IEM_MC_END();
3371 }
3372 else
3373 {
3374 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3375 IEMOP_HLP_NO_LOCK_PREFIX();
3376
3377 IEM_MC_BEGIN(0, 0);
3378 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3379 IEM_MC_ADVANCE_RIP();
3380 } IEM_MC_ELSE() {
3381 IEM_MC_REL_JMP_S32(i32Imm);
3382 } IEM_MC_ENDIF();
3383 IEM_MC_END();
3384 }
3385 return VINF_SUCCESS;
3386}
3387
3388
3389/** Opcode 0x0f 0x88. */
3390FNIEMOP_DEF(iemOp_js_Jv)
3391{
3392 IEMOP_MNEMONIC("js Jv");
3393 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3394 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3395 {
3396 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3397 IEMOP_HLP_NO_LOCK_PREFIX();
3398
3399 IEM_MC_BEGIN(0, 0);
3400 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3401 IEM_MC_REL_JMP_S16(i16Imm);
3402 } IEM_MC_ELSE() {
3403 IEM_MC_ADVANCE_RIP();
3404 } IEM_MC_ENDIF();
3405 IEM_MC_END();
3406 }
3407 else
3408 {
3409 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3410 IEMOP_HLP_NO_LOCK_PREFIX();
3411
3412 IEM_MC_BEGIN(0, 0);
3413 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3414 IEM_MC_REL_JMP_S32(i32Imm);
3415 } IEM_MC_ELSE() {
3416 IEM_MC_ADVANCE_RIP();
3417 } IEM_MC_ENDIF();
3418 IEM_MC_END();
3419 }
3420 return VINF_SUCCESS;
3421}
3422
3423
3424/** Opcode 0x0f 0x89. */
3425FNIEMOP_DEF(iemOp_jns_Jv)
3426{
3427 IEMOP_MNEMONIC("jns Jv");
3428 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3429 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3430 {
3431 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3432 IEMOP_HLP_NO_LOCK_PREFIX();
3433
3434 IEM_MC_BEGIN(0, 0);
3435 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3436 IEM_MC_ADVANCE_RIP();
3437 } IEM_MC_ELSE() {
3438 IEM_MC_REL_JMP_S16(i16Imm);
3439 } IEM_MC_ENDIF();
3440 IEM_MC_END();
3441 }
3442 else
3443 {
3444 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3445 IEMOP_HLP_NO_LOCK_PREFIX();
3446
3447 IEM_MC_BEGIN(0, 0);
3448 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3449 IEM_MC_ADVANCE_RIP();
3450 } IEM_MC_ELSE() {
3451 IEM_MC_REL_JMP_S32(i32Imm);
3452 } IEM_MC_ENDIF();
3453 IEM_MC_END();
3454 }
3455 return VINF_SUCCESS;
3456}
3457
3458
3459/** Opcode 0x0f 0x8a. */
3460FNIEMOP_DEF(iemOp_jp_Jv)
3461{
3462 IEMOP_MNEMONIC("jp Jv");
3463 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3464 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3465 {
3466 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3467 IEMOP_HLP_NO_LOCK_PREFIX();
3468
3469 IEM_MC_BEGIN(0, 0);
3470 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3471 IEM_MC_REL_JMP_S16(i16Imm);
3472 } IEM_MC_ELSE() {
3473 IEM_MC_ADVANCE_RIP();
3474 } IEM_MC_ENDIF();
3475 IEM_MC_END();
3476 }
3477 else
3478 {
3479 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3480 IEMOP_HLP_NO_LOCK_PREFIX();
3481
3482 IEM_MC_BEGIN(0, 0);
3483 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3484 IEM_MC_REL_JMP_S32(i32Imm);
3485 } IEM_MC_ELSE() {
3486 IEM_MC_ADVANCE_RIP();
3487 } IEM_MC_ENDIF();
3488 IEM_MC_END();
3489 }
3490 return VINF_SUCCESS;
3491}
3492
3493
3494/** Opcode 0x0f 0x8b. */
3495FNIEMOP_DEF(iemOp_jnp_Jv)
3496{
3497 IEMOP_MNEMONIC("jo Jv");
3498 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3499 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3500 {
3501 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3502 IEMOP_HLP_NO_LOCK_PREFIX();
3503
3504 IEM_MC_BEGIN(0, 0);
3505 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3506 IEM_MC_ADVANCE_RIP();
3507 } IEM_MC_ELSE() {
3508 IEM_MC_REL_JMP_S16(i16Imm);
3509 } IEM_MC_ENDIF();
3510 IEM_MC_END();
3511 }
3512 else
3513 {
3514 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3515 IEMOP_HLP_NO_LOCK_PREFIX();
3516
3517 IEM_MC_BEGIN(0, 0);
3518 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3519 IEM_MC_ADVANCE_RIP();
3520 } IEM_MC_ELSE() {
3521 IEM_MC_REL_JMP_S32(i32Imm);
3522 } IEM_MC_ENDIF();
3523 IEM_MC_END();
3524 }
3525 return VINF_SUCCESS;
3526}
3527
3528
3529/** Opcode 0x0f 0x8c. */
3530FNIEMOP_DEF(iemOp_jl_Jv)
3531{
3532 IEMOP_MNEMONIC("jl/jnge Jv");
3533 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3534 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3535 {
3536 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3537 IEMOP_HLP_NO_LOCK_PREFIX();
3538
3539 IEM_MC_BEGIN(0, 0);
3540 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3541 IEM_MC_REL_JMP_S16(i16Imm);
3542 } IEM_MC_ELSE() {
3543 IEM_MC_ADVANCE_RIP();
3544 } IEM_MC_ENDIF();
3545 IEM_MC_END();
3546 }
3547 else
3548 {
3549 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3550 IEMOP_HLP_NO_LOCK_PREFIX();
3551
3552 IEM_MC_BEGIN(0, 0);
3553 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3554 IEM_MC_REL_JMP_S32(i32Imm);
3555 } IEM_MC_ELSE() {
3556 IEM_MC_ADVANCE_RIP();
3557 } IEM_MC_ENDIF();
3558 IEM_MC_END();
3559 }
3560 return VINF_SUCCESS;
3561}
3562
3563
3564/** Opcode 0x0f 0x8d. */
3565FNIEMOP_DEF(iemOp_jnl_Jv)
3566{
3567 IEMOP_MNEMONIC("jnl/jge Jv");
3568 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3569 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3570 {
3571 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3572 IEMOP_HLP_NO_LOCK_PREFIX();
3573
3574 IEM_MC_BEGIN(0, 0);
3575 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3576 IEM_MC_ADVANCE_RIP();
3577 } IEM_MC_ELSE() {
3578 IEM_MC_REL_JMP_S16(i16Imm);
3579 } IEM_MC_ENDIF();
3580 IEM_MC_END();
3581 }
3582 else
3583 {
3584 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3585 IEMOP_HLP_NO_LOCK_PREFIX();
3586
3587 IEM_MC_BEGIN(0, 0);
3588 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3589 IEM_MC_ADVANCE_RIP();
3590 } IEM_MC_ELSE() {
3591 IEM_MC_REL_JMP_S32(i32Imm);
3592 } IEM_MC_ENDIF();
3593 IEM_MC_END();
3594 }
3595 return VINF_SUCCESS;
3596}
3597
3598
3599/** Opcode 0x0f 0x8e. */
3600FNIEMOP_DEF(iemOp_jle_Jv)
3601{
3602 IEMOP_MNEMONIC("jle/jng Jv");
3603 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3604 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3605 {
3606 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3607 IEMOP_HLP_NO_LOCK_PREFIX();
3608
3609 IEM_MC_BEGIN(0, 0);
3610 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3611 IEM_MC_REL_JMP_S16(i16Imm);
3612 } IEM_MC_ELSE() {
3613 IEM_MC_ADVANCE_RIP();
3614 } IEM_MC_ENDIF();
3615 IEM_MC_END();
3616 }
3617 else
3618 {
3619 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3620 IEMOP_HLP_NO_LOCK_PREFIX();
3621
3622 IEM_MC_BEGIN(0, 0);
3623 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3624 IEM_MC_REL_JMP_S32(i32Imm);
3625 } IEM_MC_ELSE() {
3626 IEM_MC_ADVANCE_RIP();
3627 } IEM_MC_ENDIF();
3628 IEM_MC_END();
3629 }
3630 return VINF_SUCCESS;
3631}
3632
3633
3634/** Opcode 0x0f 0x8f. */
3635FNIEMOP_DEF(iemOp_jnle_Jv)
3636{
3637 IEMOP_MNEMONIC("jnle/jg Jv");
3638 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3639 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3640 {
3641 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3642 IEMOP_HLP_NO_LOCK_PREFIX();
3643
3644 IEM_MC_BEGIN(0, 0);
3645 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3646 IEM_MC_ADVANCE_RIP();
3647 } IEM_MC_ELSE() {
3648 IEM_MC_REL_JMP_S16(i16Imm);
3649 } IEM_MC_ENDIF();
3650 IEM_MC_END();
3651 }
3652 else
3653 {
3654 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3655 IEMOP_HLP_NO_LOCK_PREFIX();
3656
3657 IEM_MC_BEGIN(0, 0);
3658 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3659 IEM_MC_ADVANCE_RIP();
3660 } IEM_MC_ELSE() {
3661 IEM_MC_REL_JMP_S32(i32Imm);
3662 } IEM_MC_ENDIF();
3663 IEM_MC_END();
3664 }
3665 return VINF_SUCCESS;
3666}
3667
3668
3669/** Opcode 0x0f 0x90. */
3670FNIEMOP_DEF(iemOp_seto_Eb)
3671{
3672 IEMOP_MNEMONIC("seto Eb");
3673 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3674 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3675
3676 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3677 * any way. AMD says it's "unused", whatever that means. We're
3678 * ignoring for now. */
3679 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3680 {
3681 /* register target */
3682 IEM_MC_BEGIN(0, 0);
3683 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3684 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3685 } IEM_MC_ELSE() {
3686 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3687 } IEM_MC_ENDIF();
3688 IEM_MC_ADVANCE_RIP();
3689 IEM_MC_END();
3690 }
3691 else
3692 {
3693 /* memory target */
3694 IEM_MC_BEGIN(0, 1);
3695 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3696 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3697 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3698 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3699 } IEM_MC_ELSE() {
3700 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3701 } IEM_MC_ENDIF();
3702 IEM_MC_ADVANCE_RIP();
3703 IEM_MC_END();
3704 }
3705 return VINF_SUCCESS;
3706}
3707
3708
3709/** Opcode 0x0f 0x91. */
3710FNIEMOP_DEF(iemOp_setno_Eb)
3711{
3712 IEMOP_MNEMONIC("setno Eb");
3713 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3714 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3715
3716 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3717 * any way. AMD says it's "unused", whatever that means. We're
3718 * ignoring for now. */
3719 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3720 {
3721 /* register target */
3722 IEM_MC_BEGIN(0, 0);
3723 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3724 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3725 } IEM_MC_ELSE() {
3726 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3727 } IEM_MC_ENDIF();
3728 IEM_MC_ADVANCE_RIP();
3729 IEM_MC_END();
3730 }
3731 else
3732 {
3733 /* memory target */
3734 IEM_MC_BEGIN(0, 1);
3735 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3736 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3737 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3738 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3739 } IEM_MC_ELSE() {
3740 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3741 } IEM_MC_ENDIF();
3742 IEM_MC_ADVANCE_RIP();
3743 IEM_MC_END();
3744 }
3745 return VINF_SUCCESS;
3746}
3747
3748
3749/** Opcode 0x0f 0x92. */
3750FNIEMOP_DEF(iemOp_setc_Eb)
3751{
3752 IEMOP_MNEMONIC("setc Eb");
3753 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3754 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3755
3756 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3757 * any way. AMD says it's "unused", whatever that means. We're
3758 * ignoring for now. */
3759 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3760 {
3761 /* register target */
3762 IEM_MC_BEGIN(0, 0);
3763 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3764 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3765 } IEM_MC_ELSE() {
3766 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3767 } IEM_MC_ENDIF();
3768 IEM_MC_ADVANCE_RIP();
3769 IEM_MC_END();
3770 }
3771 else
3772 {
3773 /* memory target */
3774 IEM_MC_BEGIN(0, 1);
3775 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3776 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3777 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3778 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3779 } IEM_MC_ELSE() {
3780 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3781 } IEM_MC_ENDIF();
3782 IEM_MC_ADVANCE_RIP();
3783 IEM_MC_END();
3784 }
3785 return VINF_SUCCESS;
3786}
3787
3788
3789/** Opcode 0x0f 0x93. */
3790FNIEMOP_DEF(iemOp_setnc_Eb)
3791{
3792 IEMOP_MNEMONIC("setnc Eb");
3793 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3794 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3795
3796 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3797 * any way. AMD says it's "unused", whatever that means. We're
3798 * ignoring for now. */
3799 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3800 {
3801 /* register target */
3802 IEM_MC_BEGIN(0, 0);
3803 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3804 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3805 } IEM_MC_ELSE() {
3806 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3807 } IEM_MC_ENDIF();
3808 IEM_MC_ADVANCE_RIP();
3809 IEM_MC_END();
3810 }
3811 else
3812 {
3813 /* memory target */
3814 IEM_MC_BEGIN(0, 1);
3815 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3816 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3817 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3818 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3819 } IEM_MC_ELSE() {
3820 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3821 } IEM_MC_ENDIF();
3822 IEM_MC_ADVANCE_RIP();
3823 IEM_MC_END();
3824 }
3825 return VINF_SUCCESS;
3826}
3827
3828
3829/** Opcode 0x0f 0x94. */
3830FNIEMOP_DEF(iemOp_sete_Eb)
3831{
3832 IEMOP_MNEMONIC("sete Eb");
3833 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3834 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3835
3836 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3837 * any way. AMD says it's "unused", whatever that means. We're
3838 * ignoring for now. */
3839 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3840 {
3841 /* register target */
3842 IEM_MC_BEGIN(0, 0);
3843 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3844 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3845 } IEM_MC_ELSE() {
3846 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3847 } IEM_MC_ENDIF();
3848 IEM_MC_ADVANCE_RIP();
3849 IEM_MC_END();
3850 }
3851 else
3852 {
3853 /* memory target */
3854 IEM_MC_BEGIN(0, 1);
3855 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3856 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3857 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3858 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3859 } IEM_MC_ELSE() {
3860 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3861 } IEM_MC_ENDIF();
3862 IEM_MC_ADVANCE_RIP();
3863 IEM_MC_END();
3864 }
3865 return VINF_SUCCESS;
3866}
3867
3868
3869/** Opcode 0x0f 0x95. */
3870FNIEMOP_DEF(iemOp_setne_Eb)
3871{
3872 IEMOP_MNEMONIC("setne Eb");
3873 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3874 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3875
3876 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3877 * any way. AMD says it's "unused", whatever that means. We're
3878 * ignoring for now. */
3879 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3880 {
3881 /* register target */
3882 IEM_MC_BEGIN(0, 0);
3883 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3884 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3885 } IEM_MC_ELSE() {
3886 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3887 } IEM_MC_ENDIF();
3888 IEM_MC_ADVANCE_RIP();
3889 IEM_MC_END();
3890 }
3891 else
3892 {
3893 /* memory target */
3894 IEM_MC_BEGIN(0, 1);
3895 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3896 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3897 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3898 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3899 } IEM_MC_ELSE() {
3900 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3901 } IEM_MC_ENDIF();
3902 IEM_MC_ADVANCE_RIP();
3903 IEM_MC_END();
3904 }
3905 return VINF_SUCCESS;
3906}
3907
3908
3909/** Opcode 0x0f 0x96. */
3910FNIEMOP_DEF(iemOp_setbe_Eb)
3911{
3912 IEMOP_MNEMONIC("setbe Eb");
3913 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3914 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3915
3916 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3917 * any way. AMD says it's "unused", whatever that means. We're
3918 * ignoring for now. */
3919 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3920 {
3921 /* register target */
3922 IEM_MC_BEGIN(0, 0);
3923 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3924 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3925 } IEM_MC_ELSE() {
3926 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3927 } IEM_MC_ENDIF();
3928 IEM_MC_ADVANCE_RIP();
3929 IEM_MC_END();
3930 }
3931 else
3932 {
3933 /* memory target */
3934 IEM_MC_BEGIN(0, 1);
3935 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3936 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3937 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3938 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3939 } IEM_MC_ELSE() {
3940 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3941 } IEM_MC_ENDIF();
3942 IEM_MC_ADVANCE_RIP();
3943 IEM_MC_END();
3944 }
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/** Opcode 0x0f 0x97. */
3950FNIEMOP_DEF(iemOp_setnbe_Eb)
3951{
3952 IEMOP_MNEMONIC("setnbe Eb");
3953 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3954 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3955
3956 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3957 * any way. AMD says it's "unused", whatever that means. We're
3958 * ignoring for now. */
3959 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3960 {
3961 /* register target */
3962 IEM_MC_BEGIN(0, 0);
3963 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3964 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3965 } IEM_MC_ELSE() {
3966 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3967 } IEM_MC_ENDIF();
3968 IEM_MC_ADVANCE_RIP();
3969 IEM_MC_END();
3970 }
3971 else
3972 {
3973 /* memory target */
3974 IEM_MC_BEGIN(0, 1);
3975 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3976 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3977 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3978 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3979 } IEM_MC_ELSE() {
3980 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3981 } IEM_MC_ENDIF();
3982 IEM_MC_ADVANCE_RIP();
3983 IEM_MC_END();
3984 }
3985 return VINF_SUCCESS;
3986}
3987
3988
3989/** Opcode 0x0f 0x98. */
3990FNIEMOP_DEF(iemOp_sets_Eb)
3991{
3992 IEMOP_MNEMONIC("sets Eb");
3993 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3994 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3995
3996 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3997 * any way. AMD says it's "unused", whatever that means. We're
3998 * ignoring for now. */
3999 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4000 {
4001 /* register target */
4002 IEM_MC_BEGIN(0, 0);
4003 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4004 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4005 } IEM_MC_ELSE() {
4006 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4007 } IEM_MC_ENDIF();
4008 IEM_MC_ADVANCE_RIP();
4009 IEM_MC_END();
4010 }
4011 else
4012 {
4013 /* memory target */
4014 IEM_MC_BEGIN(0, 1);
4015 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4016 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4017 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4018 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4019 } IEM_MC_ELSE() {
4020 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4021 } IEM_MC_ENDIF();
4022 IEM_MC_ADVANCE_RIP();
4023 IEM_MC_END();
4024 }
4025 return VINF_SUCCESS;
4026}
4027
4028
4029/** Opcode 0x0f 0x99. */
4030FNIEMOP_DEF(iemOp_setns_Eb)
4031{
4032 IEMOP_MNEMONIC("setns Eb");
4033 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4034 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4035
4036 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4037 * any way. AMD says it's "unused", whatever that means. We're
4038 * ignoring for now. */
4039 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4040 {
4041 /* register target */
4042 IEM_MC_BEGIN(0, 0);
4043 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4044 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4045 } IEM_MC_ELSE() {
4046 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4047 } IEM_MC_ENDIF();
4048 IEM_MC_ADVANCE_RIP();
4049 IEM_MC_END();
4050 }
4051 else
4052 {
4053 /* memory target */
4054 IEM_MC_BEGIN(0, 1);
4055 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4056 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4057 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4058 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4059 } IEM_MC_ELSE() {
4060 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4061 } IEM_MC_ENDIF();
4062 IEM_MC_ADVANCE_RIP();
4063 IEM_MC_END();
4064 }
4065 return VINF_SUCCESS;
4066}
4067
4068
4069/** Opcode 0x0f 0x9a. */
4070FNIEMOP_DEF(iemOp_setp_Eb)
4071{
4072 IEMOP_MNEMONIC("setnp Eb");
4073 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4074 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4075
4076 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4077 * any way. AMD says it's "unused", whatever that means. We're
4078 * ignoring for now. */
4079 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4080 {
4081 /* register target */
4082 IEM_MC_BEGIN(0, 0);
4083 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4084 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4085 } IEM_MC_ELSE() {
4086 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4087 } IEM_MC_ENDIF();
4088 IEM_MC_ADVANCE_RIP();
4089 IEM_MC_END();
4090 }
4091 else
4092 {
4093 /* memory target */
4094 IEM_MC_BEGIN(0, 1);
4095 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4096 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4097 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4098 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4099 } IEM_MC_ELSE() {
4100 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4101 } IEM_MC_ENDIF();
4102 IEM_MC_ADVANCE_RIP();
4103 IEM_MC_END();
4104 }
4105 return VINF_SUCCESS;
4106}
4107
4108
4109/** Opcode 0x0f 0x9b. */
4110FNIEMOP_DEF(iemOp_setnp_Eb)
4111{
4112 IEMOP_MNEMONIC("setnp Eb");
4113 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4114 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4115
4116 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4117 * any way. AMD says it's "unused", whatever that means. We're
4118 * ignoring for now. */
4119 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4120 {
4121 /* register target */
4122 IEM_MC_BEGIN(0, 0);
4123 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4124 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4125 } IEM_MC_ELSE() {
4126 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4127 } IEM_MC_ENDIF();
4128 IEM_MC_ADVANCE_RIP();
4129 IEM_MC_END();
4130 }
4131 else
4132 {
4133 /* memory target */
4134 IEM_MC_BEGIN(0, 1);
4135 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4137 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4138 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4139 } IEM_MC_ELSE() {
4140 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4141 } IEM_MC_ENDIF();
4142 IEM_MC_ADVANCE_RIP();
4143 IEM_MC_END();
4144 }
4145 return VINF_SUCCESS;
4146}
4147
4148
4149/** Opcode 0x0f 0x9c. */
4150FNIEMOP_DEF(iemOp_setl_Eb)
4151{
4152 IEMOP_MNEMONIC("setl Eb");
4153 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4154 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4155
4156 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4157 * any way. AMD says it's "unused", whatever that means. We're
4158 * ignoring for now. */
4159 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4160 {
4161 /* register target */
4162 IEM_MC_BEGIN(0, 0);
4163 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4164 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4165 } IEM_MC_ELSE() {
4166 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4167 } IEM_MC_ENDIF();
4168 IEM_MC_ADVANCE_RIP();
4169 IEM_MC_END();
4170 }
4171 else
4172 {
4173 /* memory target */
4174 IEM_MC_BEGIN(0, 1);
4175 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4176 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4177 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4178 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4179 } IEM_MC_ELSE() {
4180 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4181 } IEM_MC_ENDIF();
4182 IEM_MC_ADVANCE_RIP();
4183 IEM_MC_END();
4184 }
4185 return VINF_SUCCESS;
4186}
4187
4188
4189/** Opcode 0x0f 0x9d. */
4190FNIEMOP_DEF(iemOp_setnl_Eb)
4191{
4192 IEMOP_MNEMONIC("setnl Eb");
4193 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4194 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4195
4196 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4197 * any way. AMD says it's "unused", whatever that means. We're
4198 * ignoring for now. */
4199 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4200 {
4201 /* register target */
4202 IEM_MC_BEGIN(0, 0);
4203 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4204 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4205 } IEM_MC_ELSE() {
4206 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4207 } IEM_MC_ENDIF();
4208 IEM_MC_ADVANCE_RIP();
4209 IEM_MC_END();
4210 }
4211 else
4212 {
4213 /* memory target */
4214 IEM_MC_BEGIN(0, 1);
4215 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4216 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4217 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4218 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4219 } IEM_MC_ELSE() {
4220 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4221 } IEM_MC_ENDIF();
4222 IEM_MC_ADVANCE_RIP();
4223 IEM_MC_END();
4224 }
4225 return VINF_SUCCESS;
4226}
4227
4228
4229/** Opcode 0x0f 0x9e. */
4230FNIEMOP_DEF(iemOp_setle_Eb)
4231{
4232 IEMOP_MNEMONIC("setle Eb");
4233 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4234 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4235
4236 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4237 * any way. AMD says it's "unused", whatever that means. We're
4238 * ignoring for now. */
4239 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4240 {
4241 /* register target */
4242 IEM_MC_BEGIN(0, 0);
4243 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4244 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4245 } IEM_MC_ELSE() {
4246 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4247 } IEM_MC_ENDIF();
4248 IEM_MC_ADVANCE_RIP();
4249 IEM_MC_END();
4250 }
4251 else
4252 {
4253 /* memory target */
4254 IEM_MC_BEGIN(0, 1);
4255 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4256 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4257 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4258 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4259 } IEM_MC_ELSE() {
4260 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4261 } IEM_MC_ENDIF();
4262 IEM_MC_ADVANCE_RIP();
4263 IEM_MC_END();
4264 }
4265 return VINF_SUCCESS;
4266}
4267
4268
4269/** Opcode 0x0f 0x9f. */
4270FNIEMOP_DEF(iemOp_setnle_Eb)
4271{
4272 IEMOP_MNEMONIC("setnle Eb");
4273 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4274 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4275
4276 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4277 * any way. AMD says it's "unused", whatever that means. We're
4278 * ignoring for now. */
4279 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4280 {
4281 /* register target */
4282 IEM_MC_BEGIN(0, 0);
4283 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4284 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4285 } IEM_MC_ELSE() {
4286 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4287 } IEM_MC_ENDIF();
4288 IEM_MC_ADVANCE_RIP();
4289 IEM_MC_END();
4290 }
4291 else
4292 {
4293 /* memory target */
4294 IEM_MC_BEGIN(0, 1);
4295 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4296 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4297 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4298 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4299 } IEM_MC_ELSE() {
4300 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4301 } IEM_MC_ENDIF();
4302 IEM_MC_ADVANCE_RIP();
4303 IEM_MC_END();
4304 }
4305 return VINF_SUCCESS;
4306}
4307
4308
4309/**
4310 * Common 'push segment-register' helper.
4311 */
4312FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
4313{
4314 IEMOP_HLP_NO_LOCK_PREFIX();
4315 if (iReg < X86_SREG_FS)
4316 IEMOP_HLP_NO_64BIT();
4317 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
4318
4319 switch (pIemCpu->enmEffOpSize)
4320 {
4321 case IEMMODE_16BIT:
4322 IEM_MC_BEGIN(0, 1);
4323 IEM_MC_LOCAL(uint16_t, u16Value);
4324 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
4325 IEM_MC_PUSH_U16(u16Value);
4326 IEM_MC_ADVANCE_RIP();
4327 IEM_MC_END();
4328 break;
4329
4330 case IEMMODE_32BIT:
4331 IEM_MC_BEGIN(0, 1);
4332 IEM_MC_LOCAL(uint32_t, u32Value);
4333 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
4334 IEM_MC_PUSH_U32_SREG(u32Value);
4335 IEM_MC_ADVANCE_RIP();
4336 IEM_MC_END();
4337 break;
4338
4339 case IEMMODE_64BIT:
4340 IEM_MC_BEGIN(0, 1);
4341 IEM_MC_LOCAL(uint64_t, u64Value);
4342 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
4343 IEM_MC_PUSH_U64(u64Value);
4344 IEM_MC_ADVANCE_RIP();
4345 IEM_MC_END();
4346 break;
4347 }
4348
4349 return VINF_SUCCESS;
4350}
4351
4352
4353/** Opcode 0x0f 0xa0. */
4354FNIEMOP_DEF(iemOp_push_fs)
4355{
4356 IEMOP_MNEMONIC("push fs");
4357 IEMOP_HLP_NO_LOCK_PREFIX();
4358 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
4359}
4360
4361
4362/** Opcode 0x0f 0xa1. */
4363FNIEMOP_DEF(iemOp_pop_fs)
4364{
4365 IEMOP_MNEMONIC("pop fs");
4366 IEMOP_HLP_NO_LOCK_PREFIX();
4367 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
4368}
4369
4370
4371/** Opcode 0x0f 0xa2. */
4372FNIEMOP_DEF(iemOp_cpuid)
4373{
4374 IEMOP_MNEMONIC("cpuid");
4375 IEMOP_HLP_NO_LOCK_PREFIX();
4376 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
4377}
4378
4379
4380/**
4381 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
4382 * iemOp_bts_Ev_Gv.
4383 */
4384FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
4385{
4386 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4387 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
4388
4389 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4390 {
4391 /* register destination. */
4392 IEMOP_HLP_NO_LOCK_PREFIX();
4393 switch (pIemCpu->enmEffOpSize)
4394 {
4395 case IEMMODE_16BIT:
4396 IEM_MC_BEGIN(3, 0);
4397 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4398 IEM_MC_ARG(uint16_t, u16Src, 1);
4399 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4400
4401 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4402 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
4403 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4404 IEM_MC_REF_EFLAGS(pEFlags);
4405 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4406
4407 IEM_MC_ADVANCE_RIP();
4408 IEM_MC_END();
4409 return VINF_SUCCESS;
4410
4411 case IEMMODE_32BIT:
4412 IEM_MC_BEGIN(3, 0);
4413 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4414 IEM_MC_ARG(uint32_t, u32Src, 1);
4415 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4416
4417 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4418 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
4419 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4420 IEM_MC_REF_EFLAGS(pEFlags);
4421 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4422
4423 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4424 IEM_MC_ADVANCE_RIP();
4425 IEM_MC_END();
4426 return VINF_SUCCESS;
4427
4428 case IEMMODE_64BIT:
4429 IEM_MC_BEGIN(3, 0);
4430 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4431 IEM_MC_ARG(uint64_t, u64Src, 1);
4432 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4433
4434 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4435 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
4436 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4437 IEM_MC_REF_EFLAGS(pEFlags);
4438 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4439
4440 IEM_MC_ADVANCE_RIP();
4441 IEM_MC_END();
4442 return VINF_SUCCESS;
4443
4444 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4445 }
4446 }
4447 else
4448 {
4449 /* memory destination. */
4450
4451 uint32_t fAccess;
4452 if (pImpl->pfnLockedU16)
4453 fAccess = IEM_ACCESS_DATA_RW;
4454 else /* BT */
4455 {
4456 IEMOP_HLP_NO_LOCK_PREFIX();
4457 fAccess = IEM_ACCESS_DATA_R;
4458 }
4459
4460 NOREF(fAccess);
4461
4462 /** @todo test negative bit offsets! */
4463 switch (pIemCpu->enmEffOpSize)
4464 {
4465 case IEMMODE_16BIT:
4466 IEM_MC_BEGIN(3, 2);
4467 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4468 IEM_MC_ARG(uint16_t, u16Src, 1);
4469 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4470 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4471 IEM_MC_LOCAL(int16_t, i16AddrAdj);
4472
4473 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4474 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4475 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
4476 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
4477 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
4478 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
4479 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
4480 IEM_MC_FETCH_EFLAGS(EFlags);
4481
4482 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4483 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4485 else
4486 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
4487 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4488
4489 IEM_MC_COMMIT_EFLAGS(EFlags);
4490 IEM_MC_ADVANCE_RIP();
4491 IEM_MC_END();
4492 return VINF_SUCCESS;
4493
4494 case IEMMODE_32BIT:
4495 IEM_MC_BEGIN(3, 2);
4496 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4497 IEM_MC_ARG(uint32_t, u32Src, 1);
4498 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4499 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4500 IEM_MC_LOCAL(int32_t, i32AddrAdj);
4501
4502 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4503 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4504 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
4505 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
4506 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
4507 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
4508 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
4509 IEM_MC_FETCH_EFLAGS(EFlags);
4510
4511 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4512 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4513 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4514 else
4515 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
4516 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4517
4518 IEM_MC_COMMIT_EFLAGS(EFlags);
4519 IEM_MC_ADVANCE_RIP();
4520 IEM_MC_END();
4521 return VINF_SUCCESS;
4522
4523 case IEMMODE_64BIT:
4524 IEM_MC_BEGIN(3, 2);
4525 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4526 IEM_MC_ARG(uint64_t, u64Src, 1);
4527 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4528 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4529 IEM_MC_LOCAL(int64_t, i64AddrAdj);
4530
4531 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4532 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4533 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
4534 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
4535 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
4536 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
4537 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
4538 IEM_MC_FETCH_EFLAGS(EFlags);
4539
4540 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4541 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4542 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4543 else
4544 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
4545 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4546
4547 IEM_MC_COMMIT_EFLAGS(EFlags);
4548 IEM_MC_ADVANCE_RIP();
4549 IEM_MC_END();
4550 return VINF_SUCCESS;
4551
4552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4553 }
4554 }
4555}
4556
4557
4558/** Opcode 0x0f 0xa3. */
4559FNIEMOP_DEF(iemOp_bt_Ev_Gv)
4560{
4561 IEMOP_MNEMONIC("bt Gv,Gv");
4562 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
4563}
4564
4565
4566/**
4567 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
4568 */
4569FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
4570{
4571 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4572 IEMOP_HLP_NO_LOCK_PREFIX();
4573 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4574
4575 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4576 {
4577 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4578 IEMOP_HLP_NO_LOCK_PREFIX();
4579
4580 switch (pIemCpu->enmEffOpSize)
4581 {
4582 case IEMMODE_16BIT:
4583 IEM_MC_BEGIN(4, 0);
4584 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4585 IEM_MC_ARG(uint16_t, u16Src, 1);
4586 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4587 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4588
4589 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4590 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4591 IEM_MC_REF_EFLAGS(pEFlags);
4592 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4593
4594 IEM_MC_ADVANCE_RIP();
4595 IEM_MC_END();
4596 return VINF_SUCCESS;
4597
4598 case IEMMODE_32BIT:
4599 IEM_MC_BEGIN(4, 0);
4600 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4601 IEM_MC_ARG(uint32_t, u32Src, 1);
4602 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4603 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4604
4605 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4606 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4607 IEM_MC_REF_EFLAGS(pEFlags);
4608 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4609
4610 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4611 IEM_MC_ADVANCE_RIP();
4612 IEM_MC_END();
4613 return VINF_SUCCESS;
4614
4615 case IEMMODE_64BIT:
4616 IEM_MC_BEGIN(4, 0);
4617 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4618 IEM_MC_ARG(uint64_t, u64Src, 1);
4619 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4620 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4621
4622 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4623 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4624 IEM_MC_REF_EFLAGS(pEFlags);
4625 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4626
4627 IEM_MC_ADVANCE_RIP();
4628 IEM_MC_END();
4629 return VINF_SUCCESS;
4630
4631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4632 }
4633 }
4634 else
4635 {
4636 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4637
4638 switch (pIemCpu->enmEffOpSize)
4639 {
4640 case IEMMODE_16BIT:
4641 IEM_MC_BEGIN(4, 2);
4642 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4643 IEM_MC_ARG(uint16_t, u16Src, 1);
4644 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4645 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4646 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4647
4648 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4649 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4650 IEM_MC_ASSIGN(cShiftArg, cShift);
4651 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4652 IEM_MC_FETCH_EFLAGS(EFlags);
4653 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4654 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4655
4656 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4657 IEM_MC_COMMIT_EFLAGS(EFlags);
4658 IEM_MC_ADVANCE_RIP();
4659 IEM_MC_END();
4660 return VINF_SUCCESS;
4661
4662 case IEMMODE_32BIT:
4663 IEM_MC_BEGIN(4, 2);
4664 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4665 IEM_MC_ARG(uint32_t, u32Src, 1);
4666 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4667 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4668 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4669
4670 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4671 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4672 IEM_MC_ASSIGN(cShiftArg, cShift);
4673 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4674 IEM_MC_FETCH_EFLAGS(EFlags);
4675 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4676 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4677
4678 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4679 IEM_MC_COMMIT_EFLAGS(EFlags);
4680 IEM_MC_ADVANCE_RIP();
4681 IEM_MC_END();
4682 return VINF_SUCCESS;
4683
4684 case IEMMODE_64BIT:
4685 IEM_MC_BEGIN(4, 2);
4686 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4687 IEM_MC_ARG(uint64_t, u64Src, 1);
4688 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4689 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4690 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4691
4692 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4693 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4694 IEM_MC_ASSIGN(cShiftArg, cShift);
4695 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4696 IEM_MC_FETCH_EFLAGS(EFlags);
4697 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4698 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4699
4700 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4701 IEM_MC_COMMIT_EFLAGS(EFlags);
4702 IEM_MC_ADVANCE_RIP();
4703 IEM_MC_END();
4704 return VINF_SUCCESS;
4705
4706 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4707 }
4708 }
4709}
4710
4711
4712/**
4713 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
4714 */
4715FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
4716{
4717 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4718 IEMOP_HLP_NO_LOCK_PREFIX();
4719 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4720
4721 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4722 {
4723 IEMOP_HLP_NO_LOCK_PREFIX();
4724
4725 switch (pIemCpu->enmEffOpSize)
4726 {
4727 case IEMMODE_16BIT:
4728 IEM_MC_BEGIN(4, 0);
4729 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4730 IEM_MC_ARG(uint16_t, u16Src, 1);
4731 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4732 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4733
4734 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4735 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4736 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4737 IEM_MC_REF_EFLAGS(pEFlags);
4738 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4739
4740 IEM_MC_ADVANCE_RIP();
4741 IEM_MC_END();
4742 return VINF_SUCCESS;
4743
4744 case IEMMODE_32BIT:
4745 IEM_MC_BEGIN(4, 0);
4746 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4747 IEM_MC_ARG(uint32_t, u32Src, 1);
4748 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4749 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4750
4751 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4752 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4753 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4754 IEM_MC_REF_EFLAGS(pEFlags);
4755 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4756
4757 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4758 IEM_MC_ADVANCE_RIP();
4759 IEM_MC_END();
4760 return VINF_SUCCESS;
4761
4762 case IEMMODE_64BIT:
4763 IEM_MC_BEGIN(4, 0);
4764 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4765 IEM_MC_ARG(uint64_t, u64Src, 1);
4766 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4767 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4768
4769 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4770 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4771 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4772 IEM_MC_REF_EFLAGS(pEFlags);
4773 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4774
4775 IEM_MC_ADVANCE_RIP();
4776 IEM_MC_END();
4777 return VINF_SUCCESS;
4778
4779 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4780 }
4781 }
4782 else
4783 {
4784 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4785
4786 switch (pIemCpu->enmEffOpSize)
4787 {
4788 case IEMMODE_16BIT:
4789 IEM_MC_BEGIN(4, 2);
4790 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4791 IEM_MC_ARG(uint16_t, u16Src, 1);
4792 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4793 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4794 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4795
4796 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4797 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4798 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4799 IEM_MC_FETCH_EFLAGS(EFlags);
4800 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4801 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4802
4803 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4804 IEM_MC_COMMIT_EFLAGS(EFlags);
4805 IEM_MC_ADVANCE_RIP();
4806 IEM_MC_END();
4807 return VINF_SUCCESS;
4808
4809 case IEMMODE_32BIT:
4810 IEM_MC_BEGIN(4, 2);
4811 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4812 IEM_MC_ARG(uint32_t, u32Src, 1);
4813 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4814 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4815 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4816
4817 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4818 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4819 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4820 IEM_MC_FETCH_EFLAGS(EFlags);
4821 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4822 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4823
4824 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4825 IEM_MC_COMMIT_EFLAGS(EFlags);
4826 IEM_MC_ADVANCE_RIP();
4827 IEM_MC_END();
4828 return VINF_SUCCESS;
4829
4830 case IEMMODE_64BIT:
4831 IEM_MC_BEGIN(4, 2);
4832 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4833 IEM_MC_ARG(uint64_t, u64Src, 1);
4834 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4835 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4836 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4837
4838 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4839 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4840 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4841 IEM_MC_FETCH_EFLAGS(EFlags);
4842 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4843 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4844
4845 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4846 IEM_MC_COMMIT_EFLAGS(EFlags);
4847 IEM_MC_ADVANCE_RIP();
4848 IEM_MC_END();
4849 return VINF_SUCCESS;
4850
4851 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4852 }
4853 }
4854}
4855
4856
4857
4858/** Opcode 0x0f 0xa4. */
4859FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
4860{
4861 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
4862 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
4863}
4864
4865
4866/** Opcode 0x0f 0xa7. */
4867FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
4868{
4869 IEMOP_MNEMONIC("shld Ev,Gv,CL");
4870 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
4871}
4872
4873
4874/** Opcode 0x0f 0xa8. */
4875FNIEMOP_DEF(iemOp_push_gs)
4876{
4877 IEMOP_MNEMONIC("push gs");
4878 IEMOP_HLP_NO_LOCK_PREFIX();
4879 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
4880}
4881
4882
4883/** Opcode 0x0f 0xa9. */
4884FNIEMOP_DEF(iemOp_pop_gs)
4885{
4886 IEMOP_MNEMONIC("pop gs");
4887 IEMOP_HLP_NO_LOCK_PREFIX();
4888 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
4889}
4890
4891
4892/** Opcode 0x0f 0xaa. */
4893FNIEMOP_STUB(iemOp_rsm);
4894
4895
4896/** Opcode 0x0f 0xab. */
4897FNIEMOP_DEF(iemOp_bts_Ev_Gv)
4898{
4899 IEMOP_MNEMONIC("bts Ev,Gv");
4900 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
4901}
4902
4903
4904/** Opcode 0x0f 0xac. */
4905FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
4906{
4907 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
4908 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
4909}
4910
4911
4912/** Opcode 0x0f 0xad. */
4913FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
4914{
4915 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
4916 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
4917}
4918
4919
4920/** Opcode 0x0f 0xae mem/0. */
4921FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
4922{
4923 IEMOP_MNEMONIC("fxsave m512");
4924 IEMOP_HLP_NO_LOCK_PREFIX();
4925 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
4926 return IEMOP_RAISE_INVALID_OPCODE();
4927
4928 IEM_MC_BEGIN(3, 1);
4929 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
4930 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
4931 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
4932 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
4933 IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize);
4934 IEM_MC_END();
4935 return VINF_SUCCESS;
4936}
4937
4938
4939/** Opcode 0x0f 0xae mem/1. */
4940FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm)
4941{
4942 IEMOP_MNEMONIC("fxrstor m512");
4943 IEMOP_HLP_NO_LOCK_PREFIX();
4944 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
4945 return IEMOP_RAISE_INVALID_OPCODE();
4946
4947 IEM_MC_BEGIN(3, 1);
4948 IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
4949 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
4950 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
4951 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
4952 IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize);
4953 IEM_MC_END();
4954 return VINF_SUCCESS;
4955}
4956
4957
4958/** Opcode 0x0f 0xae mem/2. */
4959FNIEMOP_STUB_1(iemOp_Grp15_ldmxcsr, uint8_t, bRm);
4960
4961/** Opcode 0x0f 0xae mem/3. */
4962FNIEMOP_STUB_1(iemOp_Grp15_stmxcsr, uint8_t, bRm);
4963
4964/** Opcode 0x0f 0xae mem/4. */
4965FNIEMOP_UD_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm);
4966
4967/** Opcode 0x0f 0xae mem/5. */
4968FNIEMOP_UD_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm);
4969
4970/** Opcode 0x0f 0xae mem/6. */
4971FNIEMOP_UD_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm);
4972
4973/** Opcode 0x0f 0xae mem/7. */
4974FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm);
4975
4976
4977/** Opcode 0x0f 0xae 11b/5. */
4978FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm)
4979{
4980 IEMOP_MNEMONIC("lfence");
4981 IEMOP_HLP_NO_LOCK_PREFIX();
4982 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
4983 return IEMOP_RAISE_INVALID_OPCODE();
4984
4985 IEM_MC_BEGIN(0, 0);
4986 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
4987 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence);
4988 else
4989 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
4990 IEM_MC_ADVANCE_RIP();
4991 IEM_MC_END();
4992 return VINF_SUCCESS;
4993}
4994
4995
4996/** Opcode 0x0f 0xae 11b/6. */
4997FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm)
4998{
4999 IEMOP_MNEMONIC("mfence");
5000 IEMOP_HLP_NO_LOCK_PREFIX();
5001 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
5002 return IEMOP_RAISE_INVALID_OPCODE();
5003
5004 IEM_MC_BEGIN(0, 0);
5005 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
5006 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence);
5007 else
5008 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5009 IEM_MC_ADVANCE_RIP();
5010 IEM_MC_END();
5011 return VINF_SUCCESS;
5012}
5013
5014
5015/** Opcode 0x0f 0xae 11b/7. */
5016FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm)
5017{
5018 IEMOP_MNEMONIC("sfence");
5019 IEMOP_HLP_NO_LOCK_PREFIX();
5020 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
5021 return IEMOP_RAISE_INVALID_OPCODE();
5022
5023 IEM_MC_BEGIN(0, 0);
5024 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
5025 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence);
5026 else
5027 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5028 IEM_MC_ADVANCE_RIP();
5029 IEM_MC_END();
5030 return VINF_SUCCESS;
5031}
5032
5033
5034/** Opcode 0xf3 0x0f 0xae 11b/0. */
5035FNIEMOP_UD_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm);
5036
5037/** Opcode 0xf3 0x0f 0xae 11b/1. */
5038FNIEMOP_UD_STUB_1(iemOp_Grp15_rdgsbase, uint8_t, bRm);
5039
5040/** Opcode 0xf3 0x0f 0xae 11b/2. */
5041FNIEMOP_UD_STUB_1(iemOp_Grp15_wrfsbase, uint8_t, bRm);
5042
5043/** Opcode 0xf3 0x0f 0xae 11b/3. */
5044FNIEMOP_UD_STUB_1(iemOp_Grp15_wrgsbase, uint8_t, bRm);
5045
5046
5047/** Opcode 0x0f 0xae. */
5048FNIEMOP_DEF(iemOp_Grp15)
5049{
5050 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5051 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
5052 {
5053 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5054 {
5055 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_fxsave, bRm);
5056 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_fxrstor, bRm);
5057 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_ldmxcsr, bRm);
5058 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_stmxcsr, bRm);
5059 case 4: return FNIEMOP_CALL_1(iemOp_Grp15_xsave, bRm);
5060 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_xrstor, bRm);
5061 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_xsaveopt,bRm);
5062 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_clflush, bRm);
5063 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5064 }
5065 }
5066 else
5067 {
5068 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))
5069 {
5070 case 0:
5071 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5072 {
5073 case 0: return IEMOP_RAISE_INVALID_OPCODE();
5074 case 1: return IEMOP_RAISE_INVALID_OPCODE();
5075 case 2: return IEMOP_RAISE_INVALID_OPCODE();
5076 case 3: return IEMOP_RAISE_INVALID_OPCODE();
5077 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5078 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_lfence, bRm);
5079 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_mfence, bRm);
5080 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_sfence, bRm);
5081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5082 }
5083 break;
5084
5085 case IEM_OP_PRF_REPZ:
5086 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5087 {
5088 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_rdfsbase, bRm);
5089 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_rdgsbase, bRm);
5090 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_wrfsbase, bRm);
5091 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_wrgsbase, bRm);
5092 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5093 case 5: return IEMOP_RAISE_INVALID_OPCODE();
5094 case 6: return IEMOP_RAISE_INVALID_OPCODE();
5095 case 7: return IEMOP_RAISE_INVALID_OPCODE();
5096 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5097 }
5098 break;
5099
5100 default:
5101 return IEMOP_RAISE_INVALID_OPCODE();
5102 }
5103 }
5104}
5105
5106
5107/** Opcode 0x0f 0xaf. */
5108FNIEMOP_DEF(iemOp_imul_Gv_Ev)
5109{
5110 IEMOP_MNEMONIC("imul Gv,Ev");
5111 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5112 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
5113}
5114
5115
5116/** Opcode 0x0f 0xb0. */
5117FNIEMOP_DEF(iemOp_cmpxchg_Eb_Gb)
5118{
5119 IEMOP_MNEMONIC("cmpxchg Eb,Gb");
5120 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5121
5122 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5123 {
5124 IEMOP_HLP_DONE_DECODING();
5125 IEM_MC_BEGIN(4, 0);
5126 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5127 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5128 IEM_MC_ARG(uint8_t, u8Src, 2);
5129 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5130
5131 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5132 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5133 IEM_MC_REF_GREG_U8(pu8Al, X86_GREG_xAX);
5134 IEM_MC_REF_EFLAGS(pEFlags);
5135 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5136 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5137 else
5138 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5139
5140 IEM_MC_ADVANCE_RIP();
5141 IEM_MC_END();
5142 }
5143 else
5144 {
5145 IEM_MC_BEGIN(4, 3);
5146 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5147 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5148 IEM_MC_ARG(uint8_t, u8Src, 2);
5149 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5150 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5151 IEM_MC_LOCAL(uint8_t, u8Al);
5152
5153 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5154 IEMOP_HLP_DONE_DECODING();
5155 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5156 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5157 IEM_MC_FETCH_GREG_U8(u8Al, X86_GREG_xAX);
5158 IEM_MC_FETCH_EFLAGS(EFlags);
5159 IEM_MC_REF_LOCAL(pu8Al, u8Al);
5160 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5161 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5162 else
5163 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5164
5165 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
5166 IEM_MC_COMMIT_EFLAGS(EFlags);
5167 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Al);
5168 IEM_MC_ADVANCE_RIP();
5169 IEM_MC_END();
5170 }
5171 return VINF_SUCCESS;
5172}
5173
5174/** Opcode 0x0f 0xb1. */
5175FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
5176{
5177 IEMOP_MNEMONIC("cmpxchg Ev,Gv");
5178 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5179
5180 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5181 {
5182 IEMOP_HLP_DONE_DECODING();
5183 switch (pIemCpu->enmEffOpSize)
5184 {
5185 case IEMMODE_16BIT:
5186 IEM_MC_BEGIN(4, 0);
5187 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5188 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5189 IEM_MC_ARG(uint16_t, u16Src, 2);
5190 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5191
5192 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5193 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5194 IEM_MC_REF_GREG_U16(pu16Ax, X86_GREG_xAX);
5195 IEM_MC_REF_EFLAGS(pEFlags);
5196 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5197 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5198 else
5199 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5200
5201 IEM_MC_ADVANCE_RIP();
5202 IEM_MC_END();
5203 return VINF_SUCCESS;
5204
5205 case IEMMODE_32BIT:
5206 IEM_MC_BEGIN(4, 0);
5207 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5208 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5209 IEM_MC_ARG(uint32_t, u32Src, 2);
5210 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5211
5212 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5213 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5214 IEM_MC_REF_GREG_U32(pu32Eax, X86_GREG_xAX);
5215 IEM_MC_REF_EFLAGS(pEFlags);
5216 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5217 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5218 else
5219 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5220
5221 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Eax);
5222 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5223 IEM_MC_ADVANCE_RIP();
5224 IEM_MC_END();
5225 return VINF_SUCCESS;
5226
5227 case IEMMODE_64BIT:
5228 IEM_MC_BEGIN(4, 0);
5229 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5230 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5231#ifdef RT_ARCH_X86
5232 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5233#else
5234 IEM_MC_ARG(uint64_t, u64Src, 2);
5235#endif
5236 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5237
5238 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5239 IEM_MC_REF_GREG_U64(pu64Rax, X86_GREG_xAX);
5240 IEM_MC_REF_EFLAGS(pEFlags);
5241#ifdef RT_ARCH_X86
5242 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5243 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5244 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5245 else
5246 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5247#else
5248 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5249 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5250 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5251 else
5252 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5253#endif
5254
5255 IEM_MC_ADVANCE_RIP();
5256 IEM_MC_END();
5257 return VINF_SUCCESS;
5258
5259 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5260 }
5261 }
5262 else
5263 {
5264 switch (pIemCpu->enmEffOpSize)
5265 {
5266 case IEMMODE_16BIT:
5267 IEM_MC_BEGIN(4, 3);
5268 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5269 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5270 IEM_MC_ARG(uint16_t, u16Src, 2);
5271 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5272 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5273 IEM_MC_LOCAL(uint16_t, u16Ax);
5274
5275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5276 IEMOP_HLP_DONE_DECODING();
5277 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5278 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5279 IEM_MC_FETCH_GREG_U16(u16Ax, X86_GREG_xAX);
5280 IEM_MC_FETCH_EFLAGS(EFlags);
5281 IEM_MC_REF_LOCAL(pu16Ax, u16Ax);
5282 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5283 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5284 else
5285 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5286
5287 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
5288 IEM_MC_COMMIT_EFLAGS(EFlags);
5289 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Ax);
5290 IEM_MC_ADVANCE_RIP();
5291 IEM_MC_END();
5292 return VINF_SUCCESS;
5293
5294 case IEMMODE_32BIT:
5295 IEM_MC_BEGIN(4, 3);
5296 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5297 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5298 IEM_MC_ARG(uint32_t, u32Src, 2);
5299 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5300 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5301 IEM_MC_LOCAL(uint32_t, u32Eax);
5302
5303 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5304 IEMOP_HLP_DONE_DECODING();
5305 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5306 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5307 IEM_MC_FETCH_GREG_U32(u32Eax, X86_GREG_xAX);
5308 IEM_MC_FETCH_EFLAGS(EFlags);
5309 IEM_MC_REF_LOCAL(pu32Eax, u32Eax);
5310 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5311 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5312 else
5313 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5314
5315 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
5316 IEM_MC_COMMIT_EFLAGS(EFlags);
5317 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Eax);
5318 IEM_MC_ADVANCE_RIP();
5319 IEM_MC_END();
5320 return VINF_SUCCESS;
5321
5322 case IEMMODE_64BIT:
5323 IEM_MC_BEGIN(4, 3);
5324 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5325 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5326#ifdef RT_ARCH_X86
5327 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5328#else
5329 IEM_MC_ARG(uint64_t, u64Src, 2);
5330#endif
5331 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5332 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5333 IEM_MC_LOCAL(uint64_t, u64Rax);
5334
5335 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5336 IEMOP_HLP_DONE_DECODING();
5337 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5338 IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX);
5339 IEM_MC_FETCH_EFLAGS(EFlags);
5340 IEM_MC_REF_LOCAL(pu64Rax, u64Rax);
5341#ifdef RT_ARCH_X86
5342 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5343 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5344 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5345 else
5346 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5347#else
5348 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5349 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5350 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5351 else
5352 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5353#endif
5354
5355 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
5356 IEM_MC_COMMIT_EFLAGS(EFlags);
5357 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Rax);
5358 IEM_MC_ADVANCE_RIP();
5359 IEM_MC_END();
5360 return VINF_SUCCESS;
5361
5362 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5363 }
5364 }
5365}
5366
5367
5368FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm)
5369{
5370 Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */
5371 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
5372
5373 switch (pIemCpu->enmEffOpSize)
5374 {
5375 case IEMMODE_16BIT:
5376 IEM_MC_BEGIN(5, 1);
5377 IEM_MC_ARG(uint16_t, uSel, 0);
5378 IEM_MC_ARG(uint16_t, offSeg, 1);
5379 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5380 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5381 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5382 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5383 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5384 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5385 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5386 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
5387 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5388 IEM_MC_END();
5389 return VINF_SUCCESS;
5390
5391 case IEMMODE_32BIT:
5392 IEM_MC_BEGIN(5, 1);
5393 IEM_MC_ARG(uint16_t, uSel, 0);
5394 IEM_MC_ARG(uint32_t, offSeg, 1);
5395 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5396 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5397 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5398 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5399 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5400 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5401 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5402 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
5403 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5404 IEM_MC_END();
5405 return VINF_SUCCESS;
5406
5407 case IEMMODE_64BIT:
5408 IEM_MC_BEGIN(5, 1);
5409 IEM_MC_ARG(uint16_t, uSel, 0);
5410 IEM_MC_ARG(uint64_t, offSeg, 1);
5411 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5412 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5413 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5414 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5415 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5416 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5417 if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
5418 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5419 else
5420 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5421 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
5422 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5423 IEM_MC_END();
5424 return VINF_SUCCESS;
5425
5426 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5427 }
5428}
5429
5430
5431/** Opcode 0x0f 0xb2. */
5432FNIEMOP_DEF(iemOp_lss_Gv_Mp)
5433{
5434 IEMOP_MNEMONIC("lss Gv,Mp");
5435 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5436 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5437 return IEMOP_RAISE_INVALID_OPCODE();
5438 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_SS, bRm);
5439}
5440
5441
5442/** Opcode 0x0f 0xb3. */
5443FNIEMOP_DEF(iemOp_btr_Ev_Gv)
5444{
5445 IEMOP_MNEMONIC("btr Ev,Gv");
5446 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
5447}
5448
5449
5450/** Opcode 0x0f 0xb4. */
5451FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
5452{
5453 IEMOP_MNEMONIC("lfs Gv,Mp");
5454 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5455 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5456 return IEMOP_RAISE_INVALID_OPCODE();
5457 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_FS, bRm);
5458}
5459
5460
5461/** Opcode 0x0f 0xb5. */
5462FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
5463{
5464 IEMOP_MNEMONIC("lgs Gv,Mp");
5465 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5466 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5467 return IEMOP_RAISE_INVALID_OPCODE();
5468 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_GS, bRm);
5469}
5470
5471
5472/** Opcode 0x0f 0xb6. */
5473FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
5474{
5475 IEMOP_MNEMONIC("movzx Gv,Eb");
5476
5477 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5478 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5479
5480 /*
5481 * If rm is denoting a register, no more instruction bytes.
5482 */
5483 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5484 {
5485 switch (pIemCpu->enmEffOpSize)
5486 {
5487 case IEMMODE_16BIT:
5488 IEM_MC_BEGIN(0, 1);
5489 IEM_MC_LOCAL(uint16_t, u16Value);
5490 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5491 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5492 IEM_MC_ADVANCE_RIP();
5493 IEM_MC_END();
5494 return VINF_SUCCESS;
5495
5496 case IEMMODE_32BIT:
5497 IEM_MC_BEGIN(0, 1);
5498 IEM_MC_LOCAL(uint32_t, u32Value);
5499 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5500 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5501 IEM_MC_ADVANCE_RIP();
5502 IEM_MC_END();
5503 return VINF_SUCCESS;
5504
5505 case IEMMODE_64BIT:
5506 IEM_MC_BEGIN(0, 1);
5507 IEM_MC_LOCAL(uint64_t, u64Value);
5508 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5509 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5510 IEM_MC_ADVANCE_RIP();
5511 IEM_MC_END();
5512 return VINF_SUCCESS;
5513
5514 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5515 }
5516 }
5517 else
5518 {
5519 /*
5520 * We're loading a register from memory.
5521 */
5522 switch (pIemCpu->enmEffOpSize)
5523 {
5524 case IEMMODE_16BIT:
5525 IEM_MC_BEGIN(0, 2);
5526 IEM_MC_LOCAL(uint16_t, u16Value);
5527 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5528 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5529 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5530 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5531 IEM_MC_ADVANCE_RIP();
5532 IEM_MC_END();
5533 return VINF_SUCCESS;
5534
5535 case IEMMODE_32BIT:
5536 IEM_MC_BEGIN(0, 2);
5537 IEM_MC_LOCAL(uint32_t, u32Value);
5538 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5539 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5540 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5541 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5542 IEM_MC_ADVANCE_RIP();
5543 IEM_MC_END();
5544 return VINF_SUCCESS;
5545
5546 case IEMMODE_64BIT:
5547 IEM_MC_BEGIN(0, 2);
5548 IEM_MC_LOCAL(uint64_t, u64Value);
5549 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5550 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5551 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5552 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5553 IEM_MC_ADVANCE_RIP();
5554 IEM_MC_END();
5555 return VINF_SUCCESS;
5556
5557 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5558 }
5559 }
5560}
5561
5562
5563/** Opcode 0x0f 0xb7. */
5564FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
5565{
5566 IEMOP_MNEMONIC("movzx Gv,Ew");
5567
5568 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5569 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5570
5571 /** @todo Not entirely sure how the operand size prefix is handled here,
5572 * assuming that it will be ignored. Would be nice to have a few
5573 * test for this. */
5574 /*
5575 * If rm is denoting a register, no more instruction bytes.
5576 */
5577 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5578 {
5579 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5580 {
5581 IEM_MC_BEGIN(0, 1);
5582 IEM_MC_LOCAL(uint32_t, u32Value);
5583 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5584 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5585 IEM_MC_ADVANCE_RIP();
5586 IEM_MC_END();
5587 }
5588 else
5589 {
5590 IEM_MC_BEGIN(0, 1);
5591 IEM_MC_LOCAL(uint64_t, u64Value);
5592 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5593 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5594 IEM_MC_ADVANCE_RIP();
5595 IEM_MC_END();
5596 }
5597 }
5598 else
5599 {
5600 /*
5601 * We're loading a register from memory.
5602 */
5603 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5604 {
5605 IEM_MC_BEGIN(0, 2);
5606 IEM_MC_LOCAL(uint32_t, u32Value);
5607 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5608 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5609 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5610 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5611 IEM_MC_ADVANCE_RIP();
5612 IEM_MC_END();
5613 }
5614 else
5615 {
5616 IEM_MC_BEGIN(0, 2);
5617 IEM_MC_LOCAL(uint64_t, u64Value);
5618 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5619 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5620 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5621 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5622 IEM_MC_ADVANCE_RIP();
5623 IEM_MC_END();
5624 }
5625 }
5626 return VINF_SUCCESS;
5627}
5628
5629
5630/** Opcode 0x0f 0xb8. */
5631FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
5632
5633
5634/** Opcode 0x0f 0xb9. */
5635FNIEMOP_DEF(iemOp_Grp10)
5636{
5637 Log(("iemOp_Grp10 -> #UD\n"));
5638 return IEMOP_RAISE_INVALID_OPCODE();
5639}
5640
5641
5642/** Opcode 0x0f 0xba. */
5643FNIEMOP_DEF(iemOp_Grp8)
5644{
5645 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5646 PCIEMOPBINSIZES pImpl;
5647 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5648 {
5649 case 0: case 1: case 2: case 3:
5650 return IEMOP_RAISE_INVALID_OPCODE();
5651 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
5652 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
5653 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
5654 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
5655 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5656 }
5657 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5658
5659 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5660 {
5661 /* register destination. */
5662 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5663 IEMOP_HLP_NO_LOCK_PREFIX();
5664
5665 switch (pIemCpu->enmEffOpSize)
5666 {
5667 case IEMMODE_16BIT:
5668 IEM_MC_BEGIN(3, 0);
5669 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5670 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
5671 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5672
5673 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5674 IEM_MC_REF_EFLAGS(pEFlags);
5675 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5676
5677 IEM_MC_ADVANCE_RIP();
5678 IEM_MC_END();
5679 return VINF_SUCCESS;
5680
5681 case IEMMODE_32BIT:
5682 IEM_MC_BEGIN(3, 0);
5683 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5684 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
5685 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5686
5687 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5688 IEM_MC_REF_EFLAGS(pEFlags);
5689 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5690
5691 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5692 IEM_MC_ADVANCE_RIP();
5693 IEM_MC_END();
5694 return VINF_SUCCESS;
5695
5696 case IEMMODE_64BIT:
5697 IEM_MC_BEGIN(3, 0);
5698 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5699 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
5700 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5701
5702 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5703 IEM_MC_REF_EFLAGS(pEFlags);
5704 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5705
5706 IEM_MC_ADVANCE_RIP();
5707 IEM_MC_END();
5708 return VINF_SUCCESS;
5709
5710 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5711 }
5712 }
5713 else
5714 {
5715 /* memory destination. */
5716
5717 uint32_t fAccess;
5718 if (pImpl->pfnLockedU16)
5719 fAccess = IEM_ACCESS_DATA_RW;
5720 else /* BT */
5721 {
5722 IEMOP_HLP_NO_LOCK_PREFIX();
5723 fAccess = IEM_ACCESS_DATA_R;
5724 }
5725
5726 /** @todo test negative bit offsets! */
5727 switch (pIemCpu->enmEffOpSize)
5728 {
5729 case IEMMODE_16BIT:
5730 IEM_MC_BEGIN(3, 1);
5731 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5732 IEM_MC_ARG(uint16_t, u16Src, 1);
5733 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5734 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5735
5736 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5737 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5738 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
5739 IEM_MC_FETCH_EFLAGS(EFlags);
5740 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5741 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5742 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5743 else
5744 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
5745 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
5746
5747 IEM_MC_COMMIT_EFLAGS(EFlags);
5748 IEM_MC_ADVANCE_RIP();
5749 IEM_MC_END();
5750 return VINF_SUCCESS;
5751
5752 case IEMMODE_32BIT:
5753 IEM_MC_BEGIN(3, 1);
5754 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5755 IEM_MC_ARG(uint32_t, u32Src, 1);
5756 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5757 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5758
5759 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5760 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5761 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
5762 IEM_MC_FETCH_EFLAGS(EFlags);
5763 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5764 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5765 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5766 else
5767 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
5768 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
5769
5770 IEM_MC_COMMIT_EFLAGS(EFlags);
5771 IEM_MC_ADVANCE_RIP();
5772 IEM_MC_END();
5773 return VINF_SUCCESS;
5774
5775 case IEMMODE_64BIT:
5776 IEM_MC_BEGIN(3, 1);
5777 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5778 IEM_MC_ARG(uint64_t, u64Src, 1);
5779 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5780 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5781
5782 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5783 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5784 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
5785 IEM_MC_FETCH_EFLAGS(EFlags);
5786 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5787 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5788 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5789 else
5790 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
5791 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
5792
5793 IEM_MC_COMMIT_EFLAGS(EFlags);
5794 IEM_MC_ADVANCE_RIP();
5795 IEM_MC_END();
5796 return VINF_SUCCESS;
5797
5798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5799 }
5800 }
5801
5802}
5803
5804
5805/** Opcode 0x0f 0xbb. */
5806FNIEMOP_DEF(iemOp_btc_Ev_Gv)
5807{
5808 IEMOP_MNEMONIC("btc Ev,Gv");
5809 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
5810}
5811
5812
5813/** Opcode 0x0f 0xbc. */
5814FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
5815{
5816 IEMOP_MNEMONIC("bsf Gv,Ev");
5817 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
5818 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
5819}
5820
5821
5822/** Opcode 0x0f 0xbd. */
5823FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
5824{
5825 IEMOP_MNEMONIC("bsr Gv,Ev");
5826 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
5827 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
5828}
5829
5830
5831/** Opcode 0x0f 0xbe. */
5832FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
5833{
5834 IEMOP_MNEMONIC("movsx Gv,Eb");
5835
5836 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5837 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5838
5839 /*
5840 * If rm is denoting a register, no more instruction bytes.
5841 */
5842 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5843 {
5844 switch (pIemCpu->enmEffOpSize)
5845 {
5846 case IEMMODE_16BIT:
5847 IEM_MC_BEGIN(0, 1);
5848 IEM_MC_LOCAL(uint16_t, u16Value);
5849 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5850 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5851 IEM_MC_ADVANCE_RIP();
5852 IEM_MC_END();
5853 return VINF_SUCCESS;
5854
5855 case IEMMODE_32BIT:
5856 IEM_MC_BEGIN(0, 1);
5857 IEM_MC_LOCAL(uint32_t, u32Value);
5858 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5859 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5860 IEM_MC_ADVANCE_RIP();
5861 IEM_MC_END();
5862 return VINF_SUCCESS;
5863
5864 case IEMMODE_64BIT:
5865 IEM_MC_BEGIN(0, 1);
5866 IEM_MC_LOCAL(uint64_t, u64Value);
5867 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5868 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5869 IEM_MC_ADVANCE_RIP();
5870 IEM_MC_END();
5871 return VINF_SUCCESS;
5872
5873 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5874 }
5875 }
5876 else
5877 {
5878 /*
5879 * We're loading a register from memory.
5880 */
5881 switch (pIemCpu->enmEffOpSize)
5882 {
5883 case IEMMODE_16BIT:
5884 IEM_MC_BEGIN(0, 2);
5885 IEM_MC_LOCAL(uint16_t, u16Value);
5886 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5887 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5888 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5889 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5890 IEM_MC_ADVANCE_RIP();
5891 IEM_MC_END();
5892 return VINF_SUCCESS;
5893
5894 case IEMMODE_32BIT:
5895 IEM_MC_BEGIN(0, 2);
5896 IEM_MC_LOCAL(uint32_t, u32Value);
5897 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5898 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5899 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5900 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5901 IEM_MC_ADVANCE_RIP();
5902 IEM_MC_END();
5903 return VINF_SUCCESS;
5904
5905 case IEMMODE_64BIT:
5906 IEM_MC_BEGIN(0, 2);
5907 IEM_MC_LOCAL(uint64_t, u64Value);
5908 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5909 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5910 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5911 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5912 IEM_MC_ADVANCE_RIP();
5913 IEM_MC_END();
5914 return VINF_SUCCESS;
5915
5916 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5917 }
5918 }
5919}
5920
5921
5922/** Opcode 0x0f 0xbf. */
5923FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
5924{
5925 IEMOP_MNEMONIC("movsx Gv,Ew");
5926
5927 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5928 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5929
5930 /** @todo Not entirely sure how the operand size prefix is handled here,
5931 * assuming that it will be ignored. Would be nice to have a few
5932 * test for this. */
5933 /*
5934 * If rm is denoting a register, no more instruction bytes.
5935 */
5936 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5937 {
5938 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5939 {
5940 IEM_MC_BEGIN(0, 1);
5941 IEM_MC_LOCAL(uint32_t, u32Value);
5942 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5943 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5944 IEM_MC_ADVANCE_RIP();
5945 IEM_MC_END();
5946 }
5947 else
5948 {
5949 IEM_MC_BEGIN(0, 1);
5950 IEM_MC_LOCAL(uint64_t, u64Value);
5951 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5952 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5953 IEM_MC_ADVANCE_RIP();
5954 IEM_MC_END();
5955 }
5956 }
5957 else
5958 {
5959 /*
5960 * We're loading a register from memory.
5961 */
5962 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5963 {
5964 IEM_MC_BEGIN(0, 2);
5965 IEM_MC_LOCAL(uint32_t, u32Value);
5966 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5967 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5968 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5969 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5970 IEM_MC_ADVANCE_RIP();
5971 IEM_MC_END();
5972 }
5973 else
5974 {
5975 IEM_MC_BEGIN(0, 2);
5976 IEM_MC_LOCAL(uint64_t, u64Value);
5977 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5978 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5979 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5980 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5981 IEM_MC_ADVANCE_RIP();
5982 IEM_MC_END();
5983 }
5984 }
5985 return VINF_SUCCESS;
5986}
5987
5988
5989/** Opcode 0x0f 0xc0. */
5990FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
5991{
5992 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5993 IEMOP_MNEMONIC("xadd Eb,Gb");
5994
5995 /*
5996 * If rm is denoting a register, no more instruction bytes.
5997 */
5998 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5999 {
6000 IEMOP_HLP_NO_LOCK_PREFIX();
6001
6002 IEM_MC_BEGIN(3, 0);
6003 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6004 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6005 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6006
6007 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6008 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6009 IEM_MC_REF_EFLAGS(pEFlags);
6010 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6011
6012 IEM_MC_ADVANCE_RIP();
6013 IEM_MC_END();
6014 }
6015 else
6016 {
6017 /*
6018 * We're accessing memory.
6019 */
6020 IEM_MC_BEGIN(3, 3);
6021 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6022 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6023 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6024 IEM_MC_LOCAL(uint8_t, u8RegCopy);
6025 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6026
6027 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6028 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6029 IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6030 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
6031 IEM_MC_FETCH_EFLAGS(EFlags);
6032 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6033 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6034 else
6035 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
6036
6037 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
6038 IEM_MC_COMMIT_EFLAGS(EFlags);
6039 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8RegCopy);
6040 IEM_MC_ADVANCE_RIP();
6041 IEM_MC_END();
6042 return VINF_SUCCESS;
6043 }
6044 return VINF_SUCCESS;
6045}
6046
6047
6048/** Opcode 0x0f 0xc1. */
6049FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
6050{
6051 IEMOP_MNEMONIC("xadd Ev,Gv");
6052 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6053
6054 /*
6055 * If rm is denoting a register, no more instruction bytes.
6056 */
6057 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6058 {
6059 IEMOP_HLP_NO_LOCK_PREFIX();
6060
6061 switch (pIemCpu->enmEffOpSize)
6062 {
6063 case IEMMODE_16BIT:
6064 IEM_MC_BEGIN(3, 0);
6065 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6066 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6067 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6068
6069 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6070 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6071 IEM_MC_REF_EFLAGS(pEFlags);
6072 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6073
6074 IEM_MC_ADVANCE_RIP();
6075 IEM_MC_END();
6076 return VINF_SUCCESS;
6077
6078 case IEMMODE_32BIT:
6079 IEM_MC_BEGIN(3, 0);
6080 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6081 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6082 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6083
6084 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6085 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6086 IEM_MC_REF_EFLAGS(pEFlags);
6087 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6088
6089 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6090 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
6091 IEM_MC_ADVANCE_RIP();
6092 IEM_MC_END();
6093 return VINF_SUCCESS;
6094
6095 case IEMMODE_64BIT:
6096 IEM_MC_BEGIN(3, 0);
6097 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6098 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6099 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6100
6101 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6102 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6103 IEM_MC_REF_EFLAGS(pEFlags);
6104 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6105
6106 IEM_MC_ADVANCE_RIP();
6107 IEM_MC_END();
6108 return VINF_SUCCESS;
6109
6110 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6111 }
6112 }
6113 else
6114 {
6115 /*
6116 * We're accessing memory.
6117 */
6118 switch (pIemCpu->enmEffOpSize)
6119 {
6120 case IEMMODE_16BIT:
6121 IEM_MC_BEGIN(3, 3);
6122 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6123 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6124 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6125 IEM_MC_LOCAL(uint16_t, u16RegCopy);
6126 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6127
6128 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6129 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6130 IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6131 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
6132 IEM_MC_FETCH_EFLAGS(EFlags);
6133 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6134 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6135 else
6136 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
6137
6138 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
6139 IEM_MC_COMMIT_EFLAGS(EFlags);
6140 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16RegCopy);
6141 IEM_MC_ADVANCE_RIP();
6142 IEM_MC_END();
6143 return VINF_SUCCESS;
6144
6145 case IEMMODE_32BIT:
6146 IEM_MC_BEGIN(3, 3);
6147 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6148 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6149 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6150 IEM_MC_LOCAL(uint32_t, u32RegCopy);
6151 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6152
6153 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6154 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6155 IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6156 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
6157 IEM_MC_FETCH_EFLAGS(EFlags);
6158 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6159 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6160 else
6161 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
6162
6163 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
6164 IEM_MC_COMMIT_EFLAGS(EFlags);
6165 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32RegCopy);
6166 IEM_MC_ADVANCE_RIP();
6167 IEM_MC_END();
6168 return VINF_SUCCESS;
6169
6170 case IEMMODE_64BIT:
6171 IEM_MC_BEGIN(3, 3);
6172 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6173 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6174 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6175 IEM_MC_LOCAL(uint64_t, u64RegCopy);
6176 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6177
6178 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6179 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6180 IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6181 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
6182 IEM_MC_FETCH_EFLAGS(EFlags);
6183 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6184 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6185 else
6186 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
6187
6188 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
6189 IEM_MC_COMMIT_EFLAGS(EFlags);
6190 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64RegCopy);
6191 IEM_MC_ADVANCE_RIP();
6192 IEM_MC_END();
6193 return VINF_SUCCESS;
6194
6195 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6196 }
6197 }
6198}
6199
6200/** Opcode 0x0f 0xc2. */
6201FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
6202
6203/** Opcode 0x0f 0xc3. */
6204FNIEMOP_STUB(iemOp_movnti_My_Gy);
6205
6206/** Opcode 0x0f 0xc4. */
6207FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
6208
6209/** Opcode 0x0f 0xc5. */
6210FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
6211
6212/** Opcode 0x0f 0xc6. */
6213FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
6214
6215
6216/** Opcode 0x0f 0xc7 !11/1. */
6217FNIEMOP_DEF_1(iemOp_Grp9_cmpxchg8b_Mq, uint8_t, bRm)
6218{
6219 IEMOP_MNEMONIC("cmpxchg8b Mq");
6220
6221 IEM_MC_BEGIN(4, 3);
6222 IEM_MC_ARG(uint64_t *, pu64MemDst, 0);
6223 IEM_MC_ARG(PRTUINT64U, pu64EaxEdx, 1);
6224 IEM_MC_ARG(PRTUINT64U, pu64EbxEcx, 2);
6225 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3);
6226 IEM_MC_LOCAL(RTUINT64U, u64EaxEdx);
6227 IEM_MC_LOCAL(RTUINT64U, u64EbxEcx);
6228 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6229
6230 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6231 IEMOP_HLP_DONE_DECODING();
6232 IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6233
6234 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Lo, X86_GREG_xAX);
6235 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Hi, X86_GREG_xDX);
6236 IEM_MC_REF_LOCAL(pu64EaxEdx, u64EaxEdx);
6237
6238 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Lo, X86_GREG_xBX);
6239 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Hi, X86_GREG_xCX);
6240 IEM_MC_REF_LOCAL(pu64EbxEcx, u64EbxEcx);
6241
6242 IEM_MC_FETCH_EFLAGS(EFlags);
6243 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6244 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6245 else
6246 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b_locked, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6247
6248 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64MemDst, IEM_ACCESS_DATA_RW);
6249 IEM_MC_COMMIT_EFLAGS(EFlags);
6250 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
6251 /** @todo Testcase: Check effect of cmpxchg8b on bits 63:32 in rax and rdx. */
6252 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u64EaxEdx.s.Lo);
6253 IEM_MC_STORE_GREG_U32(X86_GREG_xDX, u64EaxEdx.s.Hi);
6254 IEM_MC_ENDIF();
6255 IEM_MC_ADVANCE_RIP();
6256
6257 IEM_MC_END();
6258 return VINF_SUCCESS;
6259}
6260
6261
6262/** Opcode REX.W 0x0f 0xc7 !11/1. */
6263FNIEMOP_UD_STUB_1(iemOp_Grp9_cmpxchg16b_Mdq, uint8_t, bRm);
6264
6265/** Opcode 0x0f 0xc7 11/6. */
6266FNIEMOP_UD_STUB_1(iemOp_Grp9_rdrand_Rv, uint8_t, bRm);
6267
6268/** Opcode 0x0f 0xc7 !11/6. */
6269FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrld_Mq, uint8_t, bRm);
6270
6271/** Opcode 0x66 0x0f 0xc7 !11/6. */
6272FNIEMOP_UD_STUB_1(iemOp_Grp9_vmclear_Mq, uint8_t, bRm);
6273
6274/** Opcode 0xf3 0x0f 0xc7 !11/6. */
6275FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm);
6276
6277/** Opcode [0xf3] 0x0f 0xc7 !11/7. */
6278FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrst_Mq, uint8_t, bRm);
6279
6280
6281/** Opcode 0x0f 0xc7. */
6282FNIEMOP_DEF(iemOp_Grp9)
6283{
6284 /** @todo Testcase: Check mixing 0x66 and 0xf3. Check the effect of 0xf2. */
6285 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6286 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
6287 {
6288 case 0: case 2: case 3: case 4: case 5:
6289 return IEMOP_RAISE_INVALID_OPCODE();
6290 case 1:
6291 /** @todo Testcase: Check prefix effects on cmpxchg8b/16b. */
6292 if ( (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)
6293 || (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */
6294 return IEMOP_RAISE_INVALID_OPCODE();
6295 if (bRm & IEM_OP_PRF_SIZE_REX_W)
6296 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg16b_Mdq, bRm);
6297 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg8b_Mq, bRm);
6298 case 6:
6299 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6300 return FNIEMOP_CALL_1(iemOp_Grp9_rdrand_Rv, bRm);
6301 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6302 {
6303 case 0:
6304 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrld_Mq, bRm);
6305 case IEM_OP_PRF_SIZE_OP:
6306 return FNIEMOP_CALL_1(iemOp_Grp9_vmclear_Mq, bRm);
6307 case IEM_OP_PRF_REPZ:
6308 return FNIEMOP_CALL_1(iemOp_Grp9_vmxon_Mq, bRm);
6309 default:
6310 return IEMOP_RAISE_INVALID_OPCODE();
6311 }
6312 case 7:
6313 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6314 {
6315 case 0:
6316 case IEM_OP_PRF_REPZ:
6317 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrst_Mq, bRm);
6318 default:
6319 return IEMOP_RAISE_INVALID_OPCODE();
6320 }
6321 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6322 }
6323}
6324
6325
6326/**
6327 * Common 'bswap register' helper.
6328 */
6329FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
6330{
6331 IEMOP_HLP_NO_LOCK_PREFIX();
6332 switch (pIemCpu->enmEffOpSize)
6333 {
6334 case IEMMODE_16BIT:
6335 IEM_MC_BEGIN(1, 0);
6336 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6337 IEM_MC_REF_GREG_U32(pu32Dst, iReg); /* Don't clear the high dword! */
6338 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u16, pu32Dst);
6339 IEM_MC_ADVANCE_RIP();
6340 IEM_MC_END();
6341 return VINF_SUCCESS;
6342
6343 case IEMMODE_32BIT:
6344 IEM_MC_BEGIN(1, 0);
6345 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6346 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
6347 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6348 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u32, pu32Dst);
6349 IEM_MC_ADVANCE_RIP();
6350 IEM_MC_END();
6351 return VINF_SUCCESS;
6352
6353 case IEMMODE_64BIT:
6354 IEM_MC_BEGIN(1, 0);
6355 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6356 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
6357 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u64, pu64Dst);
6358 IEM_MC_ADVANCE_RIP();
6359 IEM_MC_END();
6360 return VINF_SUCCESS;
6361
6362 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6363 }
6364}
6365
6366
6367/** Opcode 0x0f 0xc8. */
6368FNIEMOP_DEF(iemOp_bswap_rAX_r8)
6369{
6370 IEMOP_MNEMONIC("bswap rAX/r8");
6371 /* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X
6372 prefix. REX.B is the correct prefix it appears. For a parallel
6373 case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */
6374 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexB);
6375}
6376
6377
6378/** Opcode 0x0f 0xc9. */
6379FNIEMOP_DEF(iemOp_bswap_rCX_r9)
6380{
6381 IEMOP_MNEMONIC("bswap rCX/r9");
6382 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexB);
6383}
6384
6385
6386/** Opcode 0x0f 0xca. */
6387FNIEMOP_DEF(iemOp_bswap_rDX_r10)
6388{
6389 IEMOP_MNEMONIC("bswap rDX/r9");
6390 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexB);
6391}
6392
6393
6394/** Opcode 0x0f 0xcb. */
6395FNIEMOP_DEF(iemOp_bswap_rBX_r11)
6396{
6397 IEMOP_MNEMONIC("bswap rBX/r9");
6398 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexB);
6399}
6400
6401
6402/** Opcode 0x0f 0xcc. */
6403FNIEMOP_DEF(iemOp_bswap_rSP_r12)
6404{
6405 IEMOP_MNEMONIC("bswap rSP/r12");
6406 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexB);
6407}
6408
6409
6410/** Opcode 0x0f 0xcd. */
6411FNIEMOP_DEF(iemOp_bswap_rBP_r13)
6412{
6413 IEMOP_MNEMONIC("bswap rBP/r13");
6414 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexB);
6415}
6416
6417
6418/** Opcode 0x0f 0xce. */
6419FNIEMOP_DEF(iemOp_bswap_rSI_r14)
6420{
6421 IEMOP_MNEMONIC("bswap rSI/r14");
6422 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexB);
6423}
6424
6425
6426/** Opcode 0x0f 0xcf. */
6427FNIEMOP_DEF(iemOp_bswap_rDI_r15)
6428{
6429 IEMOP_MNEMONIC("bswap rDI/r15");
6430 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexB);
6431}
6432
6433
6434
6435/** Opcode 0x0f 0xd0. */
6436FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
6437/** Opcode 0x0f 0xd1. */
6438FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
6439/** Opcode 0x0f 0xd2. */
6440FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
6441/** Opcode 0x0f 0xd3. */
6442FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
6443/** Opcode 0x0f 0xd4. */
6444FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
6445/** Opcode 0x0f 0xd5. */
6446FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
6447/** Opcode 0x0f 0xd6. */
6448FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
6449
6450
6451/** Opcode 0x0f 0xd7. */
6452FNIEMOP_DEF(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq)
6453{
6454 /* Docs says register only. */
6455 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6456 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) /** @todo test that this is registers only. */
6457 return IEMOP_RAISE_INVALID_OPCODE();
6458
6459 /* Note! Taking the lazy approch here wrt the high 32-bits of the GREG. */
6460 /** @todo testcase: Check that the instruction implicitly clears the high
6461 * bits in 64-bit mode. The REX.W is first necessary when VLMAX > 256
6462 * and opcode modifications are made to work with the whole width (not
6463 * just 128). */
6464 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6465 {
6466 case IEM_OP_PRF_SIZE_OP: /* SSE */
6467 IEMOP_MNEMONIC("pmovmskb Gd,Nq");
6468 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_SSE | DISOPTYPE_HARMLESS);
6469 IEM_MC_BEGIN(2, 0);
6470 IEM_MC_ARG(uint64_t *, pDst, 0);
6471 IEM_MC_ARG(uint128_t const *, pSrc, 1);
6472 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
6473 IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6474 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6475 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc);
6476 IEM_MC_ADVANCE_RIP();
6477 IEM_MC_END();
6478 return VINF_SUCCESS;
6479
6480 case 0: /* MMX */
6481 IEMOP_MNEMONIC("pmovmskb Gd,Udq");
6482 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_MMX | DISOPTYPE_HARMLESS);
6483 IEM_MC_BEGIN(2, 0);
6484 IEM_MC_ARG(uint64_t *, pDst, 0);
6485 IEM_MC_ARG(uint64_t const *, pSrc, 1);
6486 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
6487 IEM_MC_REF_GREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
6488 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
6489 IEM_MC_CALL_MMX_AIMPL_2(iemAImpl_pmovmskb_u64, pDst, pSrc);
6490 IEM_MC_ADVANCE_RIP();
6491 IEM_MC_END();
6492 return VINF_SUCCESS;
6493
6494 default:
6495 return IEMOP_RAISE_INVALID_OPCODE();
6496 }
6497}
6498
6499
6500/** Opcode 0x0f 0xd8. */
6501FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
6502/** Opcode 0x0f 0xd9. */
6503FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
6504/** Opcode 0x0f 0xda. */
6505FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
6506/** Opcode 0x0f 0xdb. */
6507FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
6508/** Opcode 0x0f 0xdc. */
6509FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
6510/** Opcode 0x0f 0xdd. */
6511FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
6512/** Opcode 0x0f 0xde. */
6513FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
6514/** Opcode 0x0f 0xdf. */
6515FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
6516/** Opcode 0x0f 0xe0. */
6517FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
6518/** Opcode 0x0f 0xe1. */
6519FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
6520/** Opcode 0x0f 0xe2. */
6521FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
6522/** Opcode 0x0f 0xe3. */
6523FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
6524/** Opcode 0x0f 0xe4. */
6525FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
6526/** Opcode 0x0f 0xe5. */
6527FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
6528/** Opcode 0x0f 0xe6. */
6529FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
6530/** Opcode 0x0f 0xe7. */
6531FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
6532/** Opcode 0x0f 0xe8. */
6533FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
6534/** Opcode 0x0f 0xe9. */
6535FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
6536/** Opcode 0x0f 0xea. */
6537FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
6538/** Opcode 0x0f 0xeb. */
6539FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
6540/** Opcode 0x0f 0xec. */
6541FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
6542/** Opcode 0x0f 0xed. */
6543FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
6544/** Opcode 0x0f 0xee. */
6545FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
6546
6547
6548/** Opcode 0x0f 0xef. */
6549FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq)
6550{
6551 IEMOP_MNEMONIC("pxor");
6552 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pxor);
6553}
6554
6555
6556/** Opcode 0x0f 0xf0. */
6557FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
6558/** Opcode 0x0f 0xf1. */
6559FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
6560/** Opcode 0x0f 0xf2. */
6561FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
6562/** Opcode 0x0f 0xf3. */
6563FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
6564/** Opcode 0x0f 0xf4. */
6565FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
6566/** Opcode 0x0f 0xf5. */
6567FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
6568/** Opcode 0x0f 0xf6. */
6569FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
6570/** Opcode 0x0f 0xf7. */
6571FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
6572/** Opcode 0x0f 0xf8. */
6573FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); //NEXT
6574/** Opcode 0x0f 0xf9. */
6575FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
6576/** Opcode 0x0f 0xfa. */
6577FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
6578/** Opcode 0x0f 0xfb. */
6579FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
6580/** Opcode 0x0f 0xfc. */
6581FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
6582/** Opcode 0x0f 0xfd. */
6583FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
6584/** Opcode 0x0f 0xfe. */
6585FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
6586
6587
6588const PFNIEMOP g_apfnTwoByteMap[256] =
6589{
6590 /* 0x00 */ iemOp_Grp6,
6591 /* 0x01 */ iemOp_Grp7,
6592 /* 0x02 */ iemOp_lar_Gv_Ew,
6593 /* 0x03 */ iemOp_lsl_Gv_Ew,
6594 /* 0x04 */ iemOp_Invalid,
6595 /* 0x05 */ iemOp_syscall,
6596 /* 0x06 */ iemOp_clts,
6597 /* 0x07 */ iemOp_sysret,
6598 /* 0x08 */ iemOp_invd,
6599 /* 0x09 */ iemOp_wbinvd,
6600 /* 0x0a */ iemOp_Invalid,
6601 /* 0x0b */ iemOp_ud2,
6602 /* 0x0c */ iemOp_Invalid,
6603 /* 0x0d */ iemOp_nop_Ev_GrpP,
6604 /* 0x0e */ iemOp_femms,
6605 /* 0x0f */ iemOp_3Dnow,
6606 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
6607 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
6608 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
6609 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
6610 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
6611 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
6612 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
6613 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
6614 /* 0x18 */ iemOp_prefetch_Grp16,
6615 /* 0x19 */ iemOp_nop_Ev,
6616 /* 0x1a */ iemOp_nop_Ev,
6617 /* 0x1b */ iemOp_nop_Ev,
6618 /* 0x1c */ iemOp_nop_Ev,
6619 /* 0x1d */ iemOp_nop_Ev,
6620 /* 0x1e */ iemOp_nop_Ev,
6621 /* 0x1f */ iemOp_nop_Ev,
6622 /* 0x20 */ iemOp_mov_Rd_Cd,
6623 /* 0x21 */ iemOp_mov_Rd_Dd,
6624 /* 0x22 */ iemOp_mov_Cd_Rd,
6625 /* 0x23 */ iemOp_mov_Dd_Rd,
6626 /* 0x24 */ iemOp_mov_Rd_Td,
6627 /* 0x25 */ iemOp_Invalid,
6628 /* 0x26 */ iemOp_mov_Td_Rd,
6629 /* 0x27 */ iemOp_Invalid,
6630 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
6631 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
6632 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
6633 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
6634 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
6635 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
6636 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
6637 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
6638 /* 0x30 */ iemOp_wrmsr,
6639 /* 0x31 */ iemOp_rdtsc,
6640 /* 0x32 */ iemOp_rdmsr,
6641 /* 0x33 */ iemOp_rdpmc,
6642 /* 0x34 */ iemOp_sysenter,
6643 /* 0x35 */ iemOp_sysexit,
6644 /* 0x36 */ iemOp_Invalid,
6645 /* 0x37 */ iemOp_getsec,
6646 /* 0x38 */ iemOp_3byte_Esc_A4,
6647 /* 0x39 */ iemOp_Invalid,
6648 /* 0x3a */ iemOp_3byte_Esc_A5,
6649 /* 0x3b */ iemOp_Invalid,
6650 /* 0x3c */ iemOp_movnti_Gv_Ev/*??*/,
6651 /* 0x3d */ iemOp_Invalid,
6652 /* 0x3e */ iemOp_Invalid,
6653 /* 0x3f */ iemOp_Invalid,
6654 /* 0x40 */ iemOp_cmovo_Gv_Ev,
6655 /* 0x41 */ iemOp_cmovno_Gv_Ev,
6656 /* 0x42 */ iemOp_cmovc_Gv_Ev,
6657 /* 0x43 */ iemOp_cmovnc_Gv_Ev,
6658 /* 0x44 */ iemOp_cmove_Gv_Ev,
6659 /* 0x45 */ iemOp_cmovne_Gv_Ev,
6660 /* 0x46 */ iemOp_cmovbe_Gv_Ev,
6661 /* 0x47 */ iemOp_cmovnbe_Gv_Ev,
6662 /* 0x48 */ iemOp_cmovs_Gv_Ev,
6663 /* 0x49 */ iemOp_cmovns_Gv_Ev,
6664 /* 0x4a */ iemOp_cmovp_Gv_Ev,
6665 /* 0x4b */ iemOp_cmovnp_Gv_Ev,
6666 /* 0x4c */ iemOp_cmovl_Gv_Ev,
6667 /* 0x4d */ iemOp_cmovnl_Gv_Ev,
6668 /* 0x4e */ iemOp_cmovle_Gv_Ev,
6669 /* 0x4f */ iemOp_cmovnle_Gv_Ev,
6670 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
6671 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
6672 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
6673 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
6674 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
6675 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
6676 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
6677 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
6678 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
6679 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
6680 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
6681 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
6682 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
6683 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
6684 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
6685 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
6686 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
6687 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
6688 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
6689 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
6690 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
6691 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
6692 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
6693 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
6694 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
6695 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
6696 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
6697 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
6698 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
6699 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
6700 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
6701 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
6702 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
6703 /* 0x71 */ iemOp_Grp12,
6704 /* 0x72 */ iemOp_Grp13,
6705 /* 0x73 */ iemOp_Grp14,
6706 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
6707 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
6708 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
6709 /* 0x77 */ iemOp_emms,
6710 /* 0x78 */ iemOp_vmread_AmdGrp17,
6711 /* 0x79 */ iemOp_vmwrite,
6712 /* 0x7a */ iemOp_Invalid,
6713 /* 0x7b */ iemOp_Invalid,
6714 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
6715 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
6716 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
6717 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
6718 /* 0x80 */ iemOp_jo_Jv,
6719 /* 0x81 */ iemOp_jno_Jv,
6720 /* 0x82 */ iemOp_jc_Jv,
6721 /* 0x83 */ iemOp_jnc_Jv,
6722 /* 0x84 */ iemOp_je_Jv,
6723 /* 0x85 */ iemOp_jne_Jv,
6724 /* 0x86 */ iemOp_jbe_Jv,
6725 /* 0x87 */ iemOp_jnbe_Jv,
6726 /* 0x88 */ iemOp_js_Jv,
6727 /* 0x89 */ iemOp_jns_Jv,
6728 /* 0x8a */ iemOp_jp_Jv,
6729 /* 0x8b */ iemOp_jnp_Jv,
6730 /* 0x8c */ iemOp_jl_Jv,
6731 /* 0x8d */ iemOp_jnl_Jv,
6732 /* 0x8e */ iemOp_jle_Jv,
6733 /* 0x8f */ iemOp_jnle_Jv,
6734 /* 0x90 */ iemOp_seto_Eb,
6735 /* 0x91 */ iemOp_setno_Eb,
6736 /* 0x92 */ iemOp_setc_Eb,
6737 /* 0x93 */ iemOp_setnc_Eb,
6738 /* 0x94 */ iemOp_sete_Eb,
6739 /* 0x95 */ iemOp_setne_Eb,
6740 /* 0x96 */ iemOp_setbe_Eb,
6741 /* 0x97 */ iemOp_setnbe_Eb,
6742 /* 0x98 */ iemOp_sets_Eb,
6743 /* 0x99 */ iemOp_setns_Eb,
6744 /* 0x9a */ iemOp_setp_Eb,
6745 /* 0x9b */ iemOp_setnp_Eb,
6746 /* 0x9c */ iemOp_setl_Eb,
6747 /* 0x9d */ iemOp_setnl_Eb,
6748 /* 0x9e */ iemOp_setle_Eb,
6749 /* 0x9f */ iemOp_setnle_Eb,
6750 /* 0xa0 */ iemOp_push_fs,
6751 /* 0xa1 */ iemOp_pop_fs,
6752 /* 0xa2 */ iemOp_cpuid,
6753 /* 0xa3 */ iemOp_bt_Ev_Gv,
6754 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib,
6755 /* 0xa5 */ iemOp_shld_Ev_Gv_CL,
6756 /* 0xa6 */ iemOp_Invalid,
6757 /* 0xa7 */ iemOp_Invalid,
6758 /* 0xa8 */ iemOp_push_gs,
6759 /* 0xa9 */ iemOp_pop_gs,
6760 /* 0xaa */ iemOp_rsm,
6761 /* 0xab */ iemOp_bts_Ev_Gv,
6762 /* 0xac */ iemOp_shrd_Ev_Gv_Ib,
6763 /* 0xad */ iemOp_shrd_Ev_Gv_CL,
6764 /* 0xae */ iemOp_Grp15,
6765 /* 0xaf */ iemOp_imul_Gv_Ev,
6766 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb,
6767 /* 0xb1 */ iemOp_cmpxchg_Ev_Gv,
6768 /* 0xb2 */ iemOp_lss_Gv_Mp,
6769 /* 0xb3 */ iemOp_btr_Ev_Gv,
6770 /* 0xb4 */ iemOp_lfs_Gv_Mp,
6771 /* 0xb5 */ iemOp_lgs_Gv_Mp,
6772 /* 0xb6 */ iemOp_movzx_Gv_Eb,
6773 /* 0xb7 */ iemOp_movzx_Gv_Ew,
6774 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,
6775 /* 0xb9 */ iemOp_Grp10,
6776 /* 0xba */ iemOp_Grp8,
6777 /* 0xbd */ iemOp_btc_Ev_Gv,
6778 /* 0xbc */ iemOp_bsf_Gv_Ev,
6779 /* 0xbd */ iemOp_bsr_Gv_Ev,
6780 /* 0xbe */ iemOp_movsx_Gv_Eb,
6781 /* 0xbf */ iemOp_movsx_Gv_Ew,
6782 /* 0xc0 */ iemOp_xadd_Eb_Gb,
6783 /* 0xc1 */ iemOp_xadd_Ev_Gv,
6784 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
6785 /* 0xc3 */ iemOp_movnti_My_Gy,
6786 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
6787 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
6788 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
6789 /* 0xc7 */ iemOp_Grp9,
6790 /* 0xc8 */ iemOp_bswap_rAX_r8,
6791 /* 0xc9 */ iemOp_bswap_rCX_r9,
6792 /* 0xca */ iemOp_bswap_rDX_r10,
6793 /* 0xcb */ iemOp_bswap_rBX_r11,
6794 /* 0xcc */ iemOp_bswap_rSP_r12,
6795 /* 0xcd */ iemOp_bswap_rBP_r13,
6796 /* 0xce */ iemOp_bswap_rSI_r14,
6797 /* 0xcf */ iemOp_bswap_rDI_r15,
6798 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
6799 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
6800 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
6801 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
6802 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
6803 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
6804 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
6805 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
6806 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
6807 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
6808 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
6809 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
6810 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
6811 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
6812 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
6813 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
6814 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
6815 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
6816 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
6817 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
6818 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
6819 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
6820 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
6821 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
6822 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
6823 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
6824 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
6825 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
6826 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
6827 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
6828 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
6829 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
6830 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
6831 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
6832 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
6833 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
6834 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
6835 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
6836 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
6837 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
6838 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
6839 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
6840 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
6841 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
6842 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
6843 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
6844 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
6845 /* 0xff */ iemOp_Invalid
6846};
6847
6848/** @} */
6849
6850
6851/** @name One byte opcodes.
6852 *
6853 * @{
6854 */
6855
6856/** Opcode 0x00. */
6857FNIEMOP_DEF(iemOp_add_Eb_Gb)
6858{
6859 IEMOP_MNEMONIC("add Eb,Gb");
6860 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
6861}
6862
6863
6864/** Opcode 0x01. */
6865FNIEMOP_DEF(iemOp_add_Ev_Gv)
6866{
6867 IEMOP_MNEMONIC("add Ev,Gv");
6868 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
6869}
6870
6871
6872/** Opcode 0x02. */
6873FNIEMOP_DEF(iemOp_add_Gb_Eb)
6874{
6875 IEMOP_MNEMONIC("add Gb,Eb");
6876 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
6877}
6878
6879
6880/** Opcode 0x03. */
6881FNIEMOP_DEF(iemOp_add_Gv_Ev)
6882{
6883 IEMOP_MNEMONIC("add Gv,Ev");
6884 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
6885}
6886
6887
6888/** Opcode 0x04. */
6889FNIEMOP_DEF(iemOp_add_Al_Ib)
6890{
6891 IEMOP_MNEMONIC("add al,Ib");
6892 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
6893}
6894
6895
6896/** Opcode 0x05. */
6897FNIEMOP_DEF(iemOp_add_eAX_Iz)
6898{
6899 IEMOP_MNEMONIC("add rAX,Iz");
6900 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
6901}
6902
6903
6904/** Opcode 0x06. */
6905FNIEMOP_DEF(iemOp_push_ES)
6906{
6907 IEMOP_MNEMONIC("push es");
6908 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
6909}
6910
6911
6912/** Opcode 0x07. */
6913FNIEMOP_DEF(iemOp_pop_ES)
6914{
6915 IEMOP_MNEMONIC("pop es");
6916 IEMOP_HLP_NO_64BIT();
6917 IEMOP_HLP_NO_LOCK_PREFIX();
6918 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
6919}
6920
6921
6922/** Opcode 0x08. */
6923FNIEMOP_DEF(iemOp_or_Eb_Gb)
6924{
6925 IEMOP_MNEMONIC("or Eb,Gb");
6926 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6927 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
6928}
6929
6930
6931/** Opcode 0x09. */
6932FNIEMOP_DEF(iemOp_or_Ev_Gv)
6933{
6934 IEMOP_MNEMONIC("or Ev,Gv ");
6935 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6936 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
6937}
6938
6939
6940/** Opcode 0x0a. */
6941FNIEMOP_DEF(iemOp_or_Gb_Eb)
6942{
6943 IEMOP_MNEMONIC("or Gb,Eb");
6944 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6945 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
6946}
6947
6948
6949/** Opcode 0x0b. */
6950FNIEMOP_DEF(iemOp_or_Gv_Ev)
6951{
6952 IEMOP_MNEMONIC("or Gv,Ev");
6953 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6954 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
6955}
6956
6957
6958/** Opcode 0x0c. */
6959FNIEMOP_DEF(iemOp_or_Al_Ib)
6960{
6961 IEMOP_MNEMONIC("or al,Ib");
6962 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6963 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
6964}
6965
6966
6967/** Opcode 0x0d. */
6968FNIEMOP_DEF(iemOp_or_eAX_Iz)
6969{
6970 IEMOP_MNEMONIC("or rAX,Iz");
6971 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6972 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
6973}
6974
6975
6976/** Opcode 0x0e. */
6977FNIEMOP_DEF(iemOp_push_CS)
6978{
6979 IEMOP_MNEMONIC("push cs");
6980 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
6981}
6982
6983
6984/** Opcode 0x0f. */
6985FNIEMOP_DEF(iemOp_2byteEscape)
6986{
6987 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6988 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
6989}
6990
6991/** Opcode 0x10. */
6992FNIEMOP_DEF(iemOp_adc_Eb_Gb)
6993{
6994 IEMOP_MNEMONIC("adc Eb,Gb");
6995 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
6996}
6997
6998
6999/** Opcode 0x11. */
7000FNIEMOP_DEF(iemOp_adc_Ev_Gv)
7001{
7002 IEMOP_MNEMONIC("adc Ev,Gv");
7003 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
7004}
7005
7006
7007/** Opcode 0x12. */
7008FNIEMOP_DEF(iemOp_adc_Gb_Eb)
7009{
7010 IEMOP_MNEMONIC("adc Gb,Eb");
7011 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
7012}
7013
7014
7015/** Opcode 0x13. */
7016FNIEMOP_DEF(iemOp_adc_Gv_Ev)
7017{
7018 IEMOP_MNEMONIC("adc Gv,Ev");
7019 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
7020}
7021
7022
7023/** Opcode 0x14. */
7024FNIEMOP_DEF(iemOp_adc_Al_Ib)
7025{
7026 IEMOP_MNEMONIC("adc al,Ib");
7027 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
7028}
7029
7030
7031/** Opcode 0x15. */
7032FNIEMOP_DEF(iemOp_adc_eAX_Iz)
7033{
7034 IEMOP_MNEMONIC("adc rAX,Iz");
7035 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
7036}
7037
7038
7039/** Opcode 0x16. */
7040FNIEMOP_DEF(iemOp_push_SS)
7041{
7042 IEMOP_MNEMONIC("push ss");
7043 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
7044}
7045
7046
7047/** Opcode 0x17. */
7048FNIEMOP_DEF(iemOp_pop_SS)
7049{
7050 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
7051 IEMOP_HLP_NO_LOCK_PREFIX();
7052 IEMOP_HLP_NO_64BIT();
7053 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
7054}
7055
7056
7057/** Opcode 0x18. */
7058FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
7059{
7060 IEMOP_MNEMONIC("sbb Eb,Gb");
7061 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
7062}
7063
7064
7065/** Opcode 0x19. */
7066FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
7067{
7068 IEMOP_MNEMONIC("sbb Ev,Gv");
7069 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
7070}
7071
7072
7073/** Opcode 0x1a. */
7074FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
7075{
7076 IEMOP_MNEMONIC("sbb Gb,Eb");
7077 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
7078}
7079
7080
7081/** Opcode 0x1b. */
7082FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
7083{
7084 IEMOP_MNEMONIC("sbb Gv,Ev");
7085 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
7086}
7087
7088
7089/** Opcode 0x1c. */
7090FNIEMOP_DEF(iemOp_sbb_Al_Ib)
7091{
7092 IEMOP_MNEMONIC("sbb al,Ib");
7093 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
7094}
7095
7096
7097/** Opcode 0x1d. */
7098FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
7099{
7100 IEMOP_MNEMONIC("sbb rAX,Iz");
7101 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
7102}
7103
7104
7105/** Opcode 0x1e. */
7106FNIEMOP_DEF(iemOp_push_DS)
7107{
7108 IEMOP_MNEMONIC("push ds");
7109 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
7110}
7111
7112
7113/** Opcode 0x1f. */
7114FNIEMOP_DEF(iemOp_pop_DS)
7115{
7116 IEMOP_MNEMONIC("pop ds");
7117 IEMOP_HLP_NO_LOCK_PREFIX();
7118 IEMOP_HLP_NO_64BIT();
7119 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
7120}
7121
7122
7123/** Opcode 0x20. */
7124FNIEMOP_DEF(iemOp_and_Eb_Gb)
7125{
7126 IEMOP_MNEMONIC("and Eb,Gb");
7127 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7128 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
7129}
7130
7131
7132/** Opcode 0x21. */
7133FNIEMOP_DEF(iemOp_and_Ev_Gv)
7134{
7135 IEMOP_MNEMONIC("and Ev,Gv");
7136 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7137 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
7138}
7139
7140
7141/** Opcode 0x22. */
7142FNIEMOP_DEF(iemOp_and_Gb_Eb)
7143{
7144 IEMOP_MNEMONIC("and Gb,Eb");
7145 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7146 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
7147}
7148
7149
7150/** Opcode 0x23. */
7151FNIEMOP_DEF(iemOp_and_Gv_Ev)
7152{
7153 IEMOP_MNEMONIC("and Gv,Ev");
7154 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7155 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
7156}
7157
7158
7159/** Opcode 0x24. */
7160FNIEMOP_DEF(iemOp_and_Al_Ib)
7161{
7162 IEMOP_MNEMONIC("and al,Ib");
7163 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7164 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
7165}
7166
7167
7168/** Opcode 0x25. */
7169FNIEMOP_DEF(iemOp_and_eAX_Iz)
7170{
7171 IEMOP_MNEMONIC("and rAX,Iz");
7172 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7173 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
7174}
7175
7176
7177/** Opcode 0x26. */
7178FNIEMOP_DEF(iemOp_seg_ES)
7179{
7180 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es");
7181 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
7182 pIemCpu->iEffSeg = X86_SREG_ES;
7183
7184 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7185 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7186}
7187
7188
7189/** Opcode 0x27. */
7190FNIEMOP_STUB(iemOp_daa);
7191
7192
7193/** Opcode 0x28. */
7194FNIEMOP_DEF(iemOp_sub_Eb_Gb)
7195{
7196 IEMOP_MNEMONIC("sub Eb,Gb");
7197 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
7198}
7199
7200
7201/** Opcode 0x29. */
7202FNIEMOP_DEF(iemOp_sub_Ev_Gv)
7203{
7204 IEMOP_MNEMONIC("sub Ev,Gv");
7205 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
7206}
7207
7208
7209/** Opcode 0x2a. */
7210FNIEMOP_DEF(iemOp_sub_Gb_Eb)
7211{
7212 IEMOP_MNEMONIC("sub Gb,Eb");
7213 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
7214}
7215
7216
7217/** Opcode 0x2b. */
7218FNIEMOP_DEF(iemOp_sub_Gv_Ev)
7219{
7220 IEMOP_MNEMONIC("sub Gv,Ev");
7221 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
7222}
7223
7224
7225/** Opcode 0x2c. */
7226FNIEMOP_DEF(iemOp_sub_Al_Ib)
7227{
7228 IEMOP_MNEMONIC("sub al,Ib");
7229 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
7230}
7231
7232
7233/** Opcode 0x2d. */
7234FNIEMOP_DEF(iemOp_sub_eAX_Iz)
7235{
7236 IEMOP_MNEMONIC("sub rAX,Iz");
7237 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
7238}
7239
7240
7241/** Opcode 0x2e. */
7242FNIEMOP_DEF(iemOp_seg_CS)
7243{
7244 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs");
7245 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
7246 pIemCpu->iEffSeg = X86_SREG_CS;
7247
7248 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7249 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7250}
7251
7252
7253/** Opcode 0x2f. */
7254FNIEMOP_STUB(iemOp_das);
7255
7256
7257/** Opcode 0x30. */
7258FNIEMOP_DEF(iemOp_xor_Eb_Gb)
7259{
7260 IEMOP_MNEMONIC("xor Eb,Gb");
7261 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7262 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
7263}
7264
7265
7266/** Opcode 0x31. */
7267FNIEMOP_DEF(iemOp_xor_Ev_Gv)
7268{
7269 IEMOP_MNEMONIC("xor Ev,Gv");
7270 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7271 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
7272}
7273
7274
7275/** Opcode 0x32. */
7276FNIEMOP_DEF(iemOp_xor_Gb_Eb)
7277{
7278 IEMOP_MNEMONIC("xor Gb,Eb");
7279 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7280 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
7281}
7282
7283
7284/** Opcode 0x33. */
7285FNIEMOP_DEF(iemOp_xor_Gv_Ev)
7286{
7287 IEMOP_MNEMONIC("xor Gv,Ev");
7288 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7289 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
7290}
7291
7292
7293/** Opcode 0x34. */
7294FNIEMOP_DEF(iemOp_xor_Al_Ib)
7295{
7296 IEMOP_MNEMONIC("xor al,Ib");
7297 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7298 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
7299}
7300
7301
7302/** Opcode 0x35. */
7303FNIEMOP_DEF(iemOp_xor_eAX_Iz)
7304{
7305 IEMOP_MNEMONIC("xor rAX,Iz");
7306 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7307 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
7308}
7309
7310
7311/** Opcode 0x36. */
7312FNIEMOP_DEF(iemOp_seg_SS)
7313{
7314 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss");
7315 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
7316 pIemCpu->iEffSeg = X86_SREG_SS;
7317
7318 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7319 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7320}
7321
7322
7323/** Opcode 0x37. */
7324FNIEMOP_STUB(iemOp_aaa);
7325
7326
7327/** Opcode 0x38. */
7328FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
7329{
7330 IEMOP_MNEMONIC("cmp Eb,Gb");
7331 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7332 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
7333}
7334
7335
7336/** Opcode 0x39. */
7337FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
7338{
7339 IEMOP_MNEMONIC("cmp Ev,Gv");
7340 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7341 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
7342}
7343
7344
7345/** Opcode 0x3a. */
7346FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
7347{
7348 IEMOP_MNEMONIC("cmp Gb,Eb");
7349 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
7350}
7351
7352
7353/** Opcode 0x3b. */
7354FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
7355{
7356 IEMOP_MNEMONIC("cmp Gv,Ev");
7357 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
7358}
7359
7360
7361/** Opcode 0x3c. */
7362FNIEMOP_DEF(iemOp_cmp_Al_Ib)
7363{
7364 IEMOP_MNEMONIC("cmp al,Ib");
7365 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
7366}
7367
7368
7369/** Opcode 0x3d. */
7370FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
7371{
7372 IEMOP_MNEMONIC("cmp rAX,Iz");
7373 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
7374}
7375
7376
7377/** Opcode 0x3e. */
7378FNIEMOP_DEF(iemOp_seg_DS)
7379{
7380 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds");
7381 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
7382 pIemCpu->iEffSeg = X86_SREG_DS;
7383
7384 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7385 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7386}
7387
7388
7389/** Opcode 0x3f. */
7390FNIEMOP_STUB(iemOp_aas);
7391
7392/**
7393 * Common 'inc/dec/not/neg register' helper.
7394 */
7395FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
7396{
7397 IEMOP_HLP_NO_LOCK_PREFIX();
7398 switch (pIemCpu->enmEffOpSize)
7399 {
7400 case IEMMODE_16BIT:
7401 IEM_MC_BEGIN(2, 0);
7402 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
7403 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7404 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7405 IEM_MC_REF_EFLAGS(pEFlags);
7406 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
7407 IEM_MC_ADVANCE_RIP();
7408 IEM_MC_END();
7409 return VINF_SUCCESS;
7410
7411 case IEMMODE_32BIT:
7412 IEM_MC_BEGIN(2, 0);
7413 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
7414 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7415 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7416 IEM_MC_REF_EFLAGS(pEFlags);
7417 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
7418 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
7419 IEM_MC_ADVANCE_RIP();
7420 IEM_MC_END();
7421 return VINF_SUCCESS;
7422
7423 case IEMMODE_64BIT:
7424 IEM_MC_BEGIN(2, 0);
7425 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
7426 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7427 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7428 IEM_MC_REF_EFLAGS(pEFlags);
7429 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
7430 IEM_MC_ADVANCE_RIP();
7431 IEM_MC_END();
7432 return VINF_SUCCESS;
7433 }
7434 return VINF_SUCCESS;
7435}
7436
7437
7438/** Opcode 0x40. */
7439FNIEMOP_DEF(iemOp_inc_eAX)
7440{
7441 /*
7442 * This is a REX prefix in 64-bit mode.
7443 */
7444 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7445 {
7446 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex");
7447 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
7448
7449 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7450 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7451 }
7452
7453 IEMOP_MNEMONIC("inc eAX");
7454 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
7455}
7456
7457
7458/** Opcode 0x41. */
7459FNIEMOP_DEF(iemOp_inc_eCX)
7460{
7461 /*
7462 * This is a REX prefix in 64-bit mode.
7463 */
7464 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7465 {
7466 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b");
7467 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
7468 pIemCpu->uRexB = 1 << 3;
7469
7470 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7471 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7472 }
7473
7474 IEMOP_MNEMONIC("inc eCX");
7475 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
7476}
7477
7478
7479/** Opcode 0x42. */
7480FNIEMOP_DEF(iemOp_inc_eDX)
7481{
7482 /*
7483 * This is a REX prefix in 64-bit mode.
7484 */
7485 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7486 {
7487 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x");
7488 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
7489 pIemCpu->uRexIndex = 1 << 3;
7490
7491 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7492 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7493 }
7494
7495 IEMOP_MNEMONIC("inc eDX");
7496 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
7497}
7498
7499
7500
7501/** Opcode 0x43. */
7502FNIEMOP_DEF(iemOp_inc_eBX)
7503{
7504 /*
7505 * This is a REX prefix in 64-bit mode.
7506 */
7507 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7508 {
7509 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx");
7510 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7511 pIemCpu->uRexB = 1 << 3;
7512 pIemCpu->uRexIndex = 1 << 3;
7513
7514 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7515 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7516 }
7517
7518 IEMOP_MNEMONIC("inc eBX");
7519 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
7520}
7521
7522
7523/** Opcode 0x44. */
7524FNIEMOP_DEF(iemOp_inc_eSP)
7525{
7526 /*
7527 * This is a REX prefix in 64-bit mode.
7528 */
7529 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7530 {
7531 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r");
7532 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
7533 pIemCpu->uRexReg = 1 << 3;
7534
7535 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7536 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7537 }
7538
7539 IEMOP_MNEMONIC("inc eSP");
7540 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
7541}
7542
7543
7544/** Opcode 0x45. */
7545FNIEMOP_DEF(iemOp_inc_eBP)
7546{
7547 /*
7548 * This is a REX prefix in 64-bit mode.
7549 */
7550 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7551 {
7552 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb");
7553 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
7554 pIemCpu->uRexReg = 1 << 3;
7555 pIemCpu->uRexB = 1 << 3;
7556
7557 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7558 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7559 }
7560
7561 IEMOP_MNEMONIC("inc eBP");
7562 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
7563}
7564
7565
7566/** Opcode 0x46. */
7567FNIEMOP_DEF(iemOp_inc_eSI)
7568{
7569 /*
7570 * This is a REX prefix in 64-bit mode.
7571 */
7572 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7573 {
7574 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx");
7575 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
7576 pIemCpu->uRexReg = 1 << 3;
7577 pIemCpu->uRexIndex = 1 << 3;
7578
7579 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7580 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7581 }
7582
7583 IEMOP_MNEMONIC("inc eSI");
7584 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
7585}
7586
7587
7588/** Opcode 0x47. */
7589FNIEMOP_DEF(iemOp_inc_eDI)
7590{
7591 /*
7592 * This is a REX prefix in 64-bit mode.
7593 */
7594 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7595 {
7596 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx");
7597 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7598 pIemCpu->uRexReg = 1 << 3;
7599 pIemCpu->uRexB = 1 << 3;
7600 pIemCpu->uRexIndex = 1 << 3;
7601
7602 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7603 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7604 }
7605
7606 IEMOP_MNEMONIC("inc eDI");
7607 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
7608}
7609
7610
7611/** Opcode 0x48. */
7612FNIEMOP_DEF(iemOp_dec_eAX)
7613{
7614 /*
7615 * This is a REX prefix in 64-bit mode.
7616 */
7617 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7618 {
7619 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w");
7620 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
7621 iemRecalEffOpSize(pIemCpu);
7622
7623 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7624 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7625 }
7626
7627 IEMOP_MNEMONIC("dec eAX");
7628 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
7629}
7630
7631
7632/** Opcode 0x49. */
7633FNIEMOP_DEF(iemOp_dec_eCX)
7634{
7635 /*
7636 * This is a REX prefix in 64-bit mode.
7637 */
7638 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7639 {
7640 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw");
7641 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
7642 pIemCpu->uRexB = 1 << 3;
7643 iemRecalEffOpSize(pIemCpu);
7644
7645 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7646 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7647 }
7648
7649 IEMOP_MNEMONIC("dec eCX");
7650 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
7651}
7652
7653
7654/** Opcode 0x4a. */
7655FNIEMOP_DEF(iemOp_dec_eDX)
7656{
7657 /*
7658 * This is a REX prefix in 64-bit mode.
7659 */
7660 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7661 {
7662 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw");
7663 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7664 pIemCpu->uRexIndex = 1 << 3;
7665 iemRecalEffOpSize(pIemCpu);
7666
7667 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7668 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7669 }
7670
7671 IEMOP_MNEMONIC("dec eDX");
7672 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
7673}
7674
7675
7676/** Opcode 0x4b. */
7677FNIEMOP_DEF(iemOp_dec_eBX)
7678{
7679 /*
7680 * This is a REX prefix in 64-bit mode.
7681 */
7682 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7683 {
7684 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw");
7685 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7686 pIemCpu->uRexB = 1 << 3;
7687 pIemCpu->uRexIndex = 1 << 3;
7688 iemRecalEffOpSize(pIemCpu);
7689
7690 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7691 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7692 }
7693
7694 IEMOP_MNEMONIC("dec eBX");
7695 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
7696}
7697
7698
7699/** Opcode 0x4c. */
7700FNIEMOP_DEF(iemOp_dec_eSP)
7701{
7702 /*
7703 * This is a REX prefix in 64-bit mode.
7704 */
7705 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7706 {
7707 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw");
7708 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
7709 pIemCpu->uRexReg = 1 << 3;
7710 iemRecalEffOpSize(pIemCpu);
7711
7712 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7713 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7714 }
7715
7716 IEMOP_MNEMONIC("dec eSP");
7717 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
7718}
7719
7720
7721/** Opcode 0x4d. */
7722FNIEMOP_DEF(iemOp_dec_eBP)
7723{
7724 /*
7725 * This is a REX prefix in 64-bit mode.
7726 */
7727 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7728 {
7729 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw");
7730 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
7731 pIemCpu->uRexReg = 1 << 3;
7732 pIemCpu->uRexB = 1 << 3;
7733 iemRecalEffOpSize(pIemCpu);
7734
7735 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7736 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7737 }
7738
7739 IEMOP_MNEMONIC("dec eBP");
7740 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
7741}
7742
7743
7744/** Opcode 0x4e. */
7745FNIEMOP_DEF(iemOp_dec_eSI)
7746{
7747 /*
7748 * This is a REX prefix in 64-bit mode.
7749 */
7750 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7751 {
7752 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw");
7753 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7754 pIemCpu->uRexReg = 1 << 3;
7755 pIemCpu->uRexIndex = 1 << 3;
7756 iemRecalEffOpSize(pIemCpu);
7757
7758 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7759 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7760 }
7761
7762 IEMOP_MNEMONIC("dec eSI");
7763 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
7764}
7765
7766
7767/** Opcode 0x4f. */
7768FNIEMOP_DEF(iemOp_dec_eDI)
7769{
7770 /*
7771 * This is a REX prefix in 64-bit mode.
7772 */
7773 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7774 {
7775 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw");
7776 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
7777 pIemCpu->uRexReg = 1 << 3;
7778 pIemCpu->uRexB = 1 << 3;
7779 pIemCpu->uRexIndex = 1 << 3;
7780 iemRecalEffOpSize(pIemCpu);
7781
7782 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7783 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7784 }
7785
7786 IEMOP_MNEMONIC("dec eDI");
7787 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
7788}
7789
7790
7791/**
7792 * Common 'push register' helper.
7793 */
7794FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
7795{
7796 IEMOP_HLP_NO_LOCK_PREFIX();
7797 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7798 {
7799 iReg |= pIemCpu->uRexB;
7800 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7801 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7802 }
7803
7804 switch (pIemCpu->enmEffOpSize)
7805 {
7806 case IEMMODE_16BIT:
7807 IEM_MC_BEGIN(0, 1);
7808 IEM_MC_LOCAL(uint16_t, u16Value);
7809 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
7810 IEM_MC_PUSH_U16(u16Value);
7811 IEM_MC_ADVANCE_RIP();
7812 IEM_MC_END();
7813 break;
7814
7815 case IEMMODE_32BIT:
7816 IEM_MC_BEGIN(0, 1);
7817 IEM_MC_LOCAL(uint32_t, u32Value);
7818 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
7819 IEM_MC_PUSH_U32(u32Value);
7820 IEM_MC_ADVANCE_RIP();
7821 IEM_MC_END();
7822 break;
7823
7824 case IEMMODE_64BIT:
7825 IEM_MC_BEGIN(0, 1);
7826 IEM_MC_LOCAL(uint64_t, u64Value);
7827 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
7828 IEM_MC_PUSH_U64(u64Value);
7829 IEM_MC_ADVANCE_RIP();
7830 IEM_MC_END();
7831 break;
7832 }
7833
7834 return VINF_SUCCESS;
7835}
7836
7837
7838/** Opcode 0x50. */
7839FNIEMOP_DEF(iemOp_push_eAX)
7840{
7841 IEMOP_MNEMONIC("push rAX");
7842 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
7843}
7844
7845
7846/** Opcode 0x51. */
7847FNIEMOP_DEF(iemOp_push_eCX)
7848{
7849 IEMOP_MNEMONIC("push rCX");
7850 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
7851}
7852
7853
7854/** Opcode 0x52. */
7855FNIEMOP_DEF(iemOp_push_eDX)
7856{
7857 IEMOP_MNEMONIC("push rDX");
7858 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
7859}
7860
7861
7862/** Opcode 0x53. */
7863FNIEMOP_DEF(iemOp_push_eBX)
7864{
7865 IEMOP_MNEMONIC("push rBX");
7866 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
7867}
7868
7869
7870/** Opcode 0x54. */
7871FNIEMOP_DEF(iemOp_push_eSP)
7872{
7873 IEMOP_MNEMONIC("push rSP");
7874 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
7875}
7876
7877
7878/** Opcode 0x55. */
7879FNIEMOP_DEF(iemOp_push_eBP)
7880{
7881 IEMOP_MNEMONIC("push rBP");
7882 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
7883}
7884
7885
7886/** Opcode 0x56. */
7887FNIEMOP_DEF(iemOp_push_eSI)
7888{
7889 IEMOP_MNEMONIC("push rSI");
7890 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
7891}
7892
7893
7894/** Opcode 0x57. */
7895FNIEMOP_DEF(iemOp_push_eDI)
7896{
7897 IEMOP_MNEMONIC("push rDI");
7898 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
7899}
7900
7901
7902/**
7903 * Common 'pop register' helper.
7904 */
7905FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
7906{
7907 IEMOP_HLP_NO_LOCK_PREFIX();
7908 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7909 {
7910 iReg |= pIemCpu->uRexB;
7911 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7912 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7913 }
7914
7915 switch (pIemCpu->enmEffOpSize)
7916 {
7917 case IEMMODE_16BIT:
7918 IEM_MC_BEGIN(0, 1);
7919 IEM_MC_LOCAL(uint16_t, *pu16Dst);
7920 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7921 IEM_MC_POP_U16(pu16Dst);
7922 IEM_MC_ADVANCE_RIP();
7923 IEM_MC_END();
7924 break;
7925
7926 case IEMMODE_32BIT:
7927 IEM_MC_BEGIN(0, 1);
7928 IEM_MC_LOCAL(uint32_t, *pu32Dst);
7929 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7930 IEM_MC_POP_U32(pu32Dst);
7931 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); /** @todo testcase*/
7932 IEM_MC_ADVANCE_RIP();
7933 IEM_MC_END();
7934 break;
7935
7936 case IEMMODE_64BIT:
7937 IEM_MC_BEGIN(0, 1);
7938 IEM_MC_LOCAL(uint64_t, *pu64Dst);
7939 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7940 IEM_MC_POP_U64(pu64Dst);
7941 IEM_MC_ADVANCE_RIP();
7942 IEM_MC_END();
7943 break;
7944 }
7945
7946 return VINF_SUCCESS;
7947}
7948
7949
7950/** Opcode 0x58. */
7951FNIEMOP_DEF(iemOp_pop_eAX)
7952{
7953 IEMOP_MNEMONIC("pop rAX");
7954 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
7955}
7956
7957
7958/** Opcode 0x59. */
7959FNIEMOP_DEF(iemOp_pop_eCX)
7960{
7961 IEMOP_MNEMONIC("pop rCX");
7962 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
7963}
7964
7965
7966/** Opcode 0x5a. */
7967FNIEMOP_DEF(iemOp_pop_eDX)
7968{
7969 IEMOP_MNEMONIC("pop rDX");
7970 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
7971}
7972
7973
7974/** Opcode 0x5b. */
7975FNIEMOP_DEF(iemOp_pop_eBX)
7976{
7977 IEMOP_MNEMONIC("pop rBX");
7978 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
7979}
7980
7981
7982/** Opcode 0x5c. */
7983FNIEMOP_DEF(iemOp_pop_eSP)
7984{
7985 IEMOP_MNEMONIC("pop rSP");
7986 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7987 {
7988 if (pIemCpu->uRexB)
7989 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
7990 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
7991 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
7992 }
7993
7994 IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP,
7995 DISOPTYPE_HARMLESS | DISOPTYPE_DEFAULT_64_OP_SIZE | DISOPTYPE_REXB_EXTENDS_OPREG);
7996 /** @todo add testcase for this instruction. */
7997 switch (pIemCpu->enmEffOpSize)
7998 {
7999 case IEMMODE_16BIT:
8000 IEM_MC_BEGIN(0, 1);
8001 IEM_MC_LOCAL(uint16_t, u16Dst);
8002 IEM_MC_POP_U16(&u16Dst); /** @todo not correct MC, fix later. */
8003 IEM_MC_STORE_GREG_U16(X86_GREG_xSP, u16Dst);
8004 IEM_MC_ADVANCE_RIP();
8005 IEM_MC_END();
8006 break;
8007
8008 case IEMMODE_32BIT:
8009 IEM_MC_BEGIN(0, 1);
8010 IEM_MC_LOCAL(uint32_t, u32Dst);
8011 IEM_MC_POP_U32(&u32Dst);
8012 IEM_MC_STORE_GREG_U32(X86_GREG_xSP, u32Dst);
8013 IEM_MC_ADVANCE_RIP();
8014 IEM_MC_END();
8015 break;
8016
8017 case IEMMODE_64BIT:
8018 IEM_MC_BEGIN(0, 1);
8019 IEM_MC_LOCAL(uint64_t, u64Dst);
8020 IEM_MC_POP_U64(&u64Dst);
8021 IEM_MC_STORE_GREG_U64(X86_GREG_xSP, u64Dst);
8022 IEM_MC_ADVANCE_RIP();
8023 IEM_MC_END();
8024 break;
8025 }
8026
8027 return VINF_SUCCESS;
8028}
8029
8030
8031/** Opcode 0x5d. */
8032FNIEMOP_DEF(iemOp_pop_eBP)
8033{
8034 IEMOP_MNEMONIC("pop rBP");
8035 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
8036}
8037
8038
8039/** Opcode 0x5e. */
8040FNIEMOP_DEF(iemOp_pop_eSI)
8041{
8042 IEMOP_MNEMONIC("pop rSI");
8043 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
8044}
8045
8046
8047/** Opcode 0x5f. */
8048FNIEMOP_DEF(iemOp_pop_eDI)
8049{
8050 IEMOP_MNEMONIC("pop rDI");
8051 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
8052}
8053
8054
8055/** Opcode 0x60. */
8056FNIEMOP_DEF(iemOp_pusha)
8057{
8058 IEMOP_MNEMONIC("pusha");
8059 IEMOP_HLP_NO_64BIT();
8060 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8061 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
8062 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8063 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
8064}
8065
8066
8067/** Opcode 0x61. */
8068FNIEMOP_DEF(iemOp_popa)
8069{
8070 IEMOP_MNEMONIC("popa");
8071 IEMOP_HLP_NO_64BIT();
8072 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8073 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
8074 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8075 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
8076}
8077
8078
8079/** Opcode 0x62. */
8080FNIEMOP_STUB(iemOp_bound_Gv_Ma);
8081
8082
8083/** Opcode 0x63 - non-64-bit modes. */
8084FNIEMOP_DEF(iemOp_arpl_Ew_Gw)
8085{
8086 IEMOP_MNEMONIC("arpl Ew,Gw");
8087 IEMOP_HLP_NO_REAL_OR_V86_MODE();
8088 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8089
8090 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8091 {
8092 /* Register */
8093 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8094 IEM_MC_BEGIN(3, 0);
8095 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8096 IEM_MC_ARG(uint16_t, u16Src, 1);
8097 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8098
8099 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8100 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK));
8101 IEM_MC_REF_EFLAGS(pEFlags);
8102 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8103
8104 IEM_MC_ADVANCE_RIP();
8105 IEM_MC_END();
8106 }
8107 else
8108 {
8109 /* Memory */
8110 IEM_MC_BEGIN(3, 2);
8111 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8112 IEM_MC_ARG(uint16_t, u16Src, 1);
8113 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8114 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8115
8116 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8117 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8118 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8119 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8120 IEM_MC_FETCH_EFLAGS(EFlags);
8121 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8122
8123 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
8124 IEM_MC_COMMIT_EFLAGS(EFlags);
8125 IEM_MC_ADVANCE_RIP();
8126 IEM_MC_END();
8127 }
8128 return VINF_SUCCESS;
8129
8130}
8131
8132
8133/** Opcode 0x63.
8134 * @note This is a weird one. It works like a regular move instruction if
8135 * REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11).
8136 * @todo This definitely needs a testcase to verify the odd cases. */
8137FNIEMOP_DEF(iemOp_movsxd_Gv_Ev)
8138{
8139 Assert(pIemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */
8140
8141 IEMOP_MNEMONIC("movsxd Gv,Ev");
8142 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8143
8144 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8145 {
8146 /*
8147 * Register to register.
8148 */
8149 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8150 IEM_MC_BEGIN(0, 1);
8151 IEM_MC_LOCAL(uint64_t, u64Value);
8152 IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8153 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8154 IEM_MC_ADVANCE_RIP();
8155 IEM_MC_END();
8156 }
8157 else
8158 {
8159 /*
8160 * We're loading a register from memory.
8161 */
8162 IEM_MC_BEGIN(0, 2);
8163 IEM_MC_LOCAL(uint64_t, u64Value);
8164 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8165 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8166 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8167 IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
8168 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8169 IEM_MC_ADVANCE_RIP();
8170 IEM_MC_END();
8171 }
8172 return VINF_SUCCESS;
8173}
8174
8175
8176/** Opcode 0x64. */
8177FNIEMOP_DEF(iemOp_seg_FS)
8178{
8179 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs");
8180 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
8181 pIemCpu->iEffSeg = X86_SREG_FS;
8182
8183 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8184 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8185}
8186
8187
8188/** Opcode 0x65. */
8189FNIEMOP_DEF(iemOp_seg_GS)
8190{
8191 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs");
8192 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
8193 pIemCpu->iEffSeg = X86_SREG_GS;
8194
8195 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8196 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8197}
8198
8199
8200/** Opcode 0x66. */
8201FNIEMOP_DEF(iemOp_op_size)
8202{
8203 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size");
8204 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
8205 iemRecalEffOpSize(pIemCpu);
8206
8207 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8208 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8209}
8210
8211
8212/** Opcode 0x67. */
8213FNIEMOP_DEF(iemOp_addr_size)
8214{
8215 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size");
8216 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
8217 switch (pIemCpu->enmDefAddrMode)
8218 {
8219 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8220 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
8221 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8222 default: AssertFailed();
8223 }
8224
8225 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8226 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8227}
8228
8229
8230/** Opcode 0x68. */
8231FNIEMOP_DEF(iemOp_push_Iz)
8232{
8233 IEMOP_MNEMONIC("push Iz");
8234 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8235 switch (pIemCpu->enmEffOpSize)
8236 {
8237 case IEMMODE_16BIT:
8238 {
8239 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8240 IEMOP_HLP_NO_LOCK_PREFIX();
8241 IEM_MC_BEGIN(0,0);
8242 IEM_MC_PUSH_U16(u16Imm);
8243 IEM_MC_ADVANCE_RIP();
8244 IEM_MC_END();
8245 return VINF_SUCCESS;
8246 }
8247
8248 case IEMMODE_32BIT:
8249 {
8250 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8251 IEMOP_HLP_NO_LOCK_PREFIX();
8252 IEM_MC_BEGIN(0,0);
8253 IEM_MC_PUSH_U32(u32Imm);
8254 IEM_MC_ADVANCE_RIP();
8255 IEM_MC_END();
8256 return VINF_SUCCESS;
8257 }
8258
8259 case IEMMODE_64BIT:
8260 {
8261 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8262 IEMOP_HLP_NO_LOCK_PREFIX();
8263 IEM_MC_BEGIN(0,0);
8264 IEM_MC_PUSH_U64(u64Imm);
8265 IEM_MC_ADVANCE_RIP();
8266 IEM_MC_END();
8267 return VINF_SUCCESS;
8268 }
8269
8270 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8271 }
8272}
8273
8274
8275/** Opcode 0x69. */
8276FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
8277{
8278 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
8279 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8280 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8281
8282 switch (pIemCpu->enmEffOpSize)
8283 {
8284 case IEMMODE_16BIT:
8285 {
8286 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8287 {
8288 /* register operand */
8289 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8290 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8291
8292 IEM_MC_BEGIN(3, 1);
8293 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8294 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
8295 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8296 IEM_MC_LOCAL(uint16_t, u16Tmp);
8297
8298 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8299 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8300 IEM_MC_REF_EFLAGS(pEFlags);
8301 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8302 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8303
8304 IEM_MC_ADVANCE_RIP();
8305 IEM_MC_END();
8306 }
8307 else
8308 {
8309 /* memory operand */
8310 IEM_MC_BEGIN(3, 2);
8311 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8312 IEM_MC_ARG(uint16_t, u16Src, 1);
8313 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8314 IEM_MC_LOCAL(uint16_t, u16Tmp);
8315 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8316
8317 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
8318 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8319 IEM_MC_ASSIGN(u16Src, u16Imm);
8320 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8321 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8322 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8323 IEM_MC_REF_EFLAGS(pEFlags);
8324 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8325 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8326
8327 IEM_MC_ADVANCE_RIP();
8328 IEM_MC_END();
8329 }
8330 return VINF_SUCCESS;
8331 }
8332
8333 case IEMMODE_32BIT:
8334 {
8335 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8336 {
8337 /* register operand */
8338 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8339 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8340
8341 IEM_MC_BEGIN(3, 1);
8342 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8343 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
8344 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8345 IEM_MC_LOCAL(uint32_t, u32Tmp);
8346
8347 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8348 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8349 IEM_MC_REF_EFLAGS(pEFlags);
8350 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8351 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8352
8353 IEM_MC_ADVANCE_RIP();
8354 IEM_MC_END();
8355 }
8356 else
8357 {
8358 /* memory operand */
8359 IEM_MC_BEGIN(3, 2);
8360 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8361 IEM_MC_ARG(uint32_t, u32Src, 1);
8362 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8363 IEM_MC_LOCAL(uint32_t, u32Tmp);
8364 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8365
8366 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8367 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8368 IEM_MC_ASSIGN(u32Src, u32Imm);
8369 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8370 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8371 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8372 IEM_MC_REF_EFLAGS(pEFlags);
8373 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8374 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8375
8376 IEM_MC_ADVANCE_RIP();
8377 IEM_MC_END();
8378 }
8379 return VINF_SUCCESS;
8380 }
8381
8382 case IEMMODE_64BIT:
8383 {
8384 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8385 {
8386 /* register operand */
8387 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8388 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8389
8390 IEM_MC_BEGIN(3, 1);
8391 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8392 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
8393 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8394 IEM_MC_LOCAL(uint64_t, u64Tmp);
8395
8396 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8397 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8398 IEM_MC_REF_EFLAGS(pEFlags);
8399 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8400 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8401
8402 IEM_MC_ADVANCE_RIP();
8403 IEM_MC_END();
8404 }
8405 else
8406 {
8407 /* memory operand */
8408 IEM_MC_BEGIN(3, 2);
8409 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8410 IEM_MC_ARG(uint64_t, u64Src, 1);
8411 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8412 IEM_MC_LOCAL(uint64_t, u64Tmp);
8413 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8414
8415 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8416 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8417 IEM_MC_ASSIGN(u64Src, u64Imm);
8418 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8419 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8420 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8421 IEM_MC_REF_EFLAGS(pEFlags);
8422 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8423 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8424
8425 IEM_MC_ADVANCE_RIP();
8426 IEM_MC_END();
8427 }
8428 return VINF_SUCCESS;
8429 }
8430 }
8431 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8432}
8433
8434
8435/** Opcode 0x6a. */
8436FNIEMOP_DEF(iemOp_push_Ib)
8437{
8438 IEMOP_MNEMONIC("push Ib");
8439 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8440 IEMOP_HLP_NO_LOCK_PREFIX();
8441 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8442
8443 IEM_MC_BEGIN(0,0);
8444 switch (pIemCpu->enmEffOpSize)
8445 {
8446 case IEMMODE_16BIT:
8447 IEM_MC_PUSH_U16(i8Imm);
8448 break;
8449 case IEMMODE_32BIT:
8450 IEM_MC_PUSH_U32(i8Imm);
8451 break;
8452 case IEMMODE_64BIT:
8453 IEM_MC_PUSH_U64(i8Imm);
8454 break;
8455 }
8456 IEM_MC_ADVANCE_RIP();
8457 IEM_MC_END();
8458 return VINF_SUCCESS;
8459}
8460
8461
8462/** Opcode 0x6b. */
8463FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
8464{
8465 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
8466 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8467 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8468
8469 switch (pIemCpu->enmEffOpSize)
8470 {
8471 case IEMMODE_16BIT:
8472 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8473 {
8474 /* register operand */
8475 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8476 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8477
8478 IEM_MC_BEGIN(3, 1);
8479 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8480 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
8481 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8482 IEM_MC_LOCAL(uint16_t, u16Tmp);
8483
8484 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8485 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8486 IEM_MC_REF_EFLAGS(pEFlags);
8487 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8488 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8489
8490 IEM_MC_ADVANCE_RIP();
8491 IEM_MC_END();
8492 }
8493 else
8494 {
8495 /* memory operand */
8496 IEM_MC_BEGIN(3, 2);
8497 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8498 IEM_MC_ARG(uint16_t, u16Src, 1);
8499 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8500 IEM_MC_LOCAL(uint16_t, u16Tmp);
8501 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8502
8503 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8504 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16Imm);
8505 IEM_MC_ASSIGN(u16Src, u16Imm);
8506 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8507 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8508 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8509 IEM_MC_REF_EFLAGS(pEFlags);
8510 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8511 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8512
8513 IEM_MC_ADVANCE_RIP();
8514 IEM_MC_END();
8515 }
8516 return VINF_SUCCESS;
8517
8518 case IEMMODE_32BIT:
8519 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8520 {
8521 /* register operand */
8522 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8523 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8524
8525 IEM_MC_BEGIN(3, 1);
8526 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8527 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
8528 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8529 IEM_MC_LOCAL(uint32_t, u32Tmp);
8530
8531 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8532 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8533 IEM_MC_REF_EFLAGS(pEFlags);
8534 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8535 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8536
8537 IEM_MC_ADVANCE_RIP();
8538 IEM_MC_END();
8539 }
8540 else
8541 {
8542 /* memory operand */
8543 IEM_MC_BEGIN(3, 2);
8544 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8545 IEM_MC_ARG(uint32_t, u32Src, 1);
8546 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8547 IEM_MC_LOCAL(uint32_t, u32Tmp);
8548 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8549
8550 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8551 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Imm);
8552 IEM_MC_ASSIGN(u32Src, u32Imm);
8553 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8554 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8555 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8556 IEM_MC_REF_EFLAGS(pEFlags);
8557 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8558 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8559
8560 IEM_MC_ADVANCE_RIP();
8561 IEM_MC_END();
8562 }
8563 return VINF_SUCCESS;
8564
8565 case IEMMODE_64BIT:
8566 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8567 {
8568 /* register operand */
8569 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8570 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8571
8572 IEM_MC_BEGIN(3, 1);
8573 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8574 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
8575 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8576 IEM_MC_LOCAL(uint64_t, u64Tmp);
8577
8578 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8579 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8580 IEM_MC_REF_EFLAGS(pEFlags);
8581 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8582 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8583
8584 IEM_MC_ADVANCE_RIP();
8585 IEM_MC_END();
8586 }
8587 else
8588 {
8589 /* memory operand */
8590 IEM_MC_BEGIN(3, 2);
8591 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8592 IEM_MC_ARG(uint64_t, u64Src, 1);
8593 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8594 IEM_MC_LOCAL(uint64_t, u64Tmp);
8595 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8596
8597 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8598 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S8_SX_U64(&u64Imm);
8599 IEM_MC_ASSIGN(u64Src, u64Imm);
8600 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8601 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8602 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8603 IEM_MC_REF_EFLAGS(pEFlags);
8604 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8605 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8606
8607 IEM_MC_ADVANCE_RIP();
8608 IEM_MC_END();
8609 }
8610 return VINF_SUCCESS;
8611 }
8612 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8613}
8614
8615
8616/** Opcode 0x6c. */
8617FNIEMOP_DEF(iemOp_insb_Yb_DX)
8618{
8619 IEMOP_HLP_NO_LOCK_PREFIX();
8620 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8621 {
8622 IEMOP_MNEMONIC("rep ins Yb,DX");
8623 switch (pIemCpu->enmEffAddrMode)
8624 {
8625 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false);
8626 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false);
8627 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false);
8628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8629 }
8630 }
8631 else
8632 {
8633 IEMOP_MNEMONIC("ins Yb,DX");
8634 switch (pIemCpu->enmEffAddrMode)
8635 {
8636 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false);
8637 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false);
8638 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false);
8639 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8640 }
8641 }
8642}
8643
8644
8645/** Opcode 0x6d. */
8646FNIEMOP_DEF(iemOp_inswd_Yv_DX)
8647{
8648 IEMOP_HLP_NO_LOCK_PREFIX();
8649 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
8650 {
8651 IEMOP_MNEMONIC("rep ins Yv,DX");
8652 switch (pIemCpu->enmEffOpSize)
8653 {
8654 case IEMMODE_16BIT:
8655 switch (pIemCpu->enmEffAddrMode)
8656 {
8657 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false);
8658 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false);
8659 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false);
8660 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8661 }
8662 break;
8663 case IEMMODE_64BIT:
8664 case IEMMODE_32BIT:
8665 switch (pIemCpu->enmEffAddrMode)
8666 {
8667 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false);
8668 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false);
8669 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false);
8670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8671 }
8672 break;
8673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8674 }
8675 }
8676 else
8677 {
8678 IEMOP_MNEMONIC("ins Yv,DX");
8679 switch (pIemCpu->enmEffOpSize)
8680 {
8681 case IEMMODE_16BIT:
8682 switch (pIemCpu->enmEffAddrMode)
8683 {
8684 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false);
8685 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false);
8686 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false);
8687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8688 }
8689 break;
8690 case IEMMODE_64BIT:
8691 case IEMMODE_32BIT:
8692 switch (pIemCpu->enmEffAddrMode)
8693 {
8694 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false);
8695 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false);
8696 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false);
8697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8698 }
8699 break;
8700 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8701 }
8702 }
8703}
8704
8705
8706/** Opcode 0x6e. */
8707FNIEMOP_DEF(iemOp_outsb_Yb_DX)
8708{
8709 IEMOP_HLP_NO_LOCK_PREFIX();
8710 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8711 {
8712 IEMOP_MNEMONIC("rep out DX,Yb");
8713 switch (pIemCpu->enmEffAddrMode)
8714 {
8715 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false);
8716 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false);
8717 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false);
8718 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8719 }
8720 }
8721 else
8722 {
8723 IEMOP_MNEMONIC("out DX,Yb");
8724 switch (pIemCpu->enmEffAddrMode)
8725 {
8726 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false);
8727 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false);
8728 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false);
8729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8730 }
8731 }
8732}
8733
8734
8735/** Opcode 0x6f. */
8736FNIEMOP_DEF(iemOp_outswd_Yv_DX)
8737{
8738 IEMOP_HLP_NO_LOCK_PREFIX();
8739 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
8740 {
8741 IEMOP_MNEMONIC("rep outs DX,Yv");
8742 switch (pIemCpu->enmEffOpSize)
8743 {
8744 case IEMMODE_16BIT:
8745 switch (pIemCpu->enmEffAddrMode)
8746 {
8747 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false);
8748 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false);
8749 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false);
8750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8751 }
8752 break;
8753 case IEMMODE_64BIT:
8754 case IEMMODE_32BIT:
8755 switch (pIemCpu->enmEffAddrMode)
8756 {
8757 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false);
8758 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false);
8759 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false);
8760 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8761 }
8762 break;
8763 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8764 }
8765 }
8766 else
8767 {
8768 IEMOP_MNEMONIC("outs DX,Yv");
8769 switch (pIemCpu->enmEffOpSize)
8770 {
8771 case IEMMODE_16BIT:
8772 switch (pIemCpu->enmEffAddrMode)
8773 {
8774 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false);
8775 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false);
8776 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false);
8777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8778 }
8779 break;
8780 case IEMMODE_64BIT:
8781 case IEMMODE_32BIT:
8782 switch (pIemCpu->enmEffAddrMode)
8783 {
8784 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false);
8785 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false);
8786 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false);
8787 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8788 }
8789 break;
8790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8791 }
8792 }
8793}
8794
8795
8796/** Opcode 0x70. */
8797FNIEMOP_DEF(iemOp_jo_Jb)
8798{
8799 IEMOP_MNEMONIC("jo Jb");
8800 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8801 IEMOP_HLP_NO_LOCK_PREFIX();
8802 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8803
8804 IEM_MC_BEGIN(0, 0);
8805 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
8806 IEM_MC_REL_JMP_S8(i8Imm);
8807 } IEM_MC_ELSE() {
8808 IEM_MC_ADVANCE_RIP();
8809 } IEM_MC_ENDIF();
8810 IEM_MC_END();
8811 return VINF_SUCCESS;
8812}
8813
8814
8815/** Opcode 0x71. */
8816FNIEMOP_DEF(iemOp_jno_Jb)
8817{
8818 IEMOP_MNEMONIC("jno Jb");
8819 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8820 IEMOP_HLP_NO_LOCK_PREFIX();
8821 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8822
8823 IEM_MC_BEGIN(0, 0);
8824 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
8825 IEM_MC_ADVANCE_RIP();
8826 } IEM_MC_ELSE() {
8827 IEM_MC_REL_JMP_S8(i8Imm);
8828 } IEM_MC_ENDIF();
8829 IEM_MC_END();
8830 return VINF_SUCCESS;
8831}
8832
8833/** Opcode 0x72. */
8834FNIEMOP_DEF(iemOp_jc_Jb)
8835{
8836 IEMOP_MNEMONIC("jc/jnae Jb");
8837 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8838 IEMOP_HLP_NO_LOCK_PREFIX();
8839 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8840
8841 IEM_MC_BEGIN(0, 0);
8842 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
8843 IEM_MC_REL_JMP_S8(i8Imm);
8844 } IEM_MC_ELSE() {
8845 IEM_MC_ADVANCE_RIP();
8846 } IEM_MC_ENDIF();
8847 IEM_MC_END();
8848 return VINF_SUCCESS;
8849}
8850
8851
8852/** Opcode 0x73. */
8853FNIEMOP_DEF(iemOp_jnc_Jb)
8854{
8855 IEMOP_MNEMONIC("jnc/jnb Jb");
8856 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8857 IEMOP_HLP_NO_LOCK_PREFIX();
8858 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8859
8860 IEM_MC_BEGIN(0, 0);
8861 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
8862 IEM_MC_ADVANCE_RIP();
8863 } IEM_MC_ELSE() {
8864 IEM_MC_REL_JMP_S8(i8Imm);
8865 } IEM_MC_ENDIF();
8866 IEM_MC_END();
8867 return VINF_SUCCESS;
8868}
8869
8870
8871/** Opcode 0x74. */
8872FNIEMOP_DEF(iemOp_je_Jb)
8873{
8874 IEMOP_MNEMONIC("je/jz Jb");
8875 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8876 IEMOP_HLP_NO_LOCK_PREFIX();
8877 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8878
8879 IEM_MC_BEGIN(0, 0);
8880 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
8881 IEM_MC_REL_JMP_S8(i8Imm);
8882 } IEM_MC_ELSE() {
8883 IEM_MC_ADVANCE_RIP();
8884 } IEM_MC_ENDIF();
8885 IEM_MC_END();
8886 return VINF_SUCCESS;
8887}
8888
8889
8890/** Opcode 0x75. */
8891FNIEMOP_DEF(iemOp_jne_Jb)
8892{
8893 IEMOP_MNEMONIC("jne/jnz Jb");
8894 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8895 IEMOP_HLP_NO_LOCK_PREFIX();
8896 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8897
8898 IEM_MC_BEGIN(0, 0);
8899 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
8900 IEM_MC_ADVANCE_RIP();
8901 } IEM_MC_ELSE() {
8902 IEM_MC_REL_JMP_S8(i8Imm);
8903 } IEM_MC_ENDIF();
8904 IEM_MC_END();
8905 return VINF_SUCCESS;
8906}
8907
8908
8909/** Opcode 0x76. */
8910FNIEMOP_DEF(iemOp_jbe_Jb)
8911{
8912 IEMOP_MNEMONIC("jbe/jna Jb");
8913 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8914 IEMOP_HLP_NO_LOCK_PREFIX();
8915 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8916
8917 IEM_MC_BEGIN(0, 0);
8918 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
8919 IEM_MC_REL_JMP_S8(i8Imm);
8920 } IEM_MC_ELSE() {
8921 IEM_MC_ADVANCE_RIP();
8922 } IEM_MC_ENDIF();
8923 IEM_MC_END();
8924 return VINF_SUCCESS;
8925}
8926
8927
8928/** Opcode 0x77. */
8929FNIEMOP_DEF(iemOp_jnbe_Jb)
8930{
8931 IEMOP_MNEMONIC("jnbe/ja Jb");
8932 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8933 IEMOP_HLP_NO_LOCK_PREFIX();
8934 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8935
8936 IEM_MC_BEGIN(0, 0);
8937 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
8938 IEM_MC_ADVANCE_RIP();
8939 } IEM_MC_ELSE() {
8940 IEM_MC_REL_JMP_S8(i8Imm);
8941 } IEM_MC_ENDIF();
8942 IEM_MC_END();
8943 return VINF_SUCCESS;
8944}
8945
8946
8947/** Opcode 0x78. */
8948FNIEMOP_DEF(iemOp_js_Jb)
8949{
8950 IEMOP_MNEMONIC("js Jb");
8951 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8952 IEMOP_HLP_NO_LOCK_PREFIX();
8953 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8954
8955 IEM_MC_BEGIN(0, 0);
8956 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
8957 IEM_MC_REL_JMP_S8(i8Imm);
8958 } IEM_MC_ELSE() {
8959 IEM_MC_ADVANCE_RIP();
8960 } IEM_MC_ENDIF();
8961 IEM_MC_END();
8962 return VINF_SUCCESS;
8963}
8964
8965
8966/** Opcode 0x79. */
8967FNIEMOP_DEF(iemOp_jns_Jb)
8968{
8969 IEMOP_MNEMONIC("jns Jb");
8970 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8971 IEMOP_HLP_NO_LOCK_PREFIX();
8972 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8973
8974 IEM_MC_BEGIN(0, 0);
8975 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
8976 IEM_MC_ADVANCE_RIP();
8977 } IEM_MC_ELSE() {
8978 IEM_MC_REL_JMP_S8(i8Imm);
8979 } IEM_MC_ENDIF();
8980 IEM_MC_END();
8981 return VINF_SUCCESS;
8982}
8983
8984
8985/** Opcode 0x7a. */
8986FNIEMOP_DEF(iemOp_jp_Jb)
8987{
8988 IEMOP_MNEMONIC("jp Jb");
8989 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8990 IEMOP_HLP_NO_LOCK_PREFIX();
8991 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8992
8993 IEM_MC_BEGIN(0, 0);
8994 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
8995 IEM_MC_REL_JMP_S8(i8Imm);
8996 } IEM_MC_ELSE() {
8997 IEM_MC_ADVANCE_RIP();
8998 } IEM_MC_ENDIF();
8999 IEM_MC_END();
9000 return VINF_SUCCESS;
9001}
9002
9003
9004/** Opcode 0x7b. */
9005FNIEMOP_DEF(iemOp_jnp_Jb)
9006{
9007 IEMOP_MNEMONIC("jnp Jb");
9008 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9009 IEMOP_HLP_NO_LOCK_PREFIX();
9010 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9011
9012 IEM_MC_BEGIN(0, 0);
9013 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
9014 IEM_MC_ADVANCE_RIP();
9015 } IEM_MC_ELSE() {
9016 IEM_MC_REL_JMP_S8(i8Imm);
9017 } IEM_MC_ENDIF();
9018 IEM_MC_END();
9019 return VINF_SUCCESS;
9020}
9021
9022
9023/** Opcode 0x7c. */
9024FNIEMOP_DEF(iemOp_jl_Jb)
9025{
9026 IEMOP_MNEMONIC("jl/jnge Jb");
9027 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9028 IEMOP_HLP_NO_LOCK_PREFIX();
9029 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9030
9031 IEM_MC_BEGIN(0, 0);
9032 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9033 IEM_MC_REL_JMP_S8(i8Imm);
9034 } IEM_MC_ELSE() {
9035 IEM_MC_ADVANCE_RIP();
9036 } IEM_MC_ENDIF();
9037 IEM_MC_END();
9038 return VINF_SUCCESS;
9039}
9040
9041
9042/** Opcode 0x7d. */
9043FNIEMOP_DEF(iemOp_jnl_Jb)
9044{
9045 IEMOP_MNEMONIC("jnl/jge Jb");
9046 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9047 IEMOP_HLP_NO_LOCK_PREFIX();
9048 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9049
9050 IEM_MC_BEGIN(0, 0);
9051 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9052 IEM_MC_ADVANCE_RIP();
9053 } IEM_MC_ELSE() {
9054 IEM_MC_REL_JMP_S8(i8Imm);
9055 } IEM_MC_ENDIF();
9056 IEM_MC_END();
9057 return VINF_SUCCESS;
9058}
9059
9060
9061/** Opcode 0x7e. */
9062FNIEMOP_DEF(iemOp_jle_Jb)
9063{
9064 IEMOP_MNEMONIC("jle/jng Jb");
9065 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9066 IEMOP_HLP_NO_LOCK_PREFIX();
9067 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9068
9069 IEM_MC_BEGIN(0, 0);
9070 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9071 IEM_MC_REL_JMP_S8(i8Imm);
9072 } IEM_MC_ELSE() {
9073 IEM_MC_ADVANCE_RIP();
9074 } IEM_MC_ENDIF();
9075 IEM_MC_END();
9076 return VINF_SUCCESS;
9077}
9078
9079
9080/** Opcode 0x7f. */
9081FNIEMOP_DEF(iemOp_jnle_Jb)
9082{
9083 IEMOP_MNEMONIC("jnle/jg Jb");
9084 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9085 IEMOP_HLP_NO_LOCK_PREFIX();
9086 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9087
9088 IEM_MC_BEGIN(0, 0);
9089 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9090 IEM_MC_ADVANCE_RIP();
9091 } IEM_MC_ELSE() {
9092 IEM_MC_REL_JMP_S8(i8Imm);
9093 } IEM_MC_ENDIF();
9094 IEM_MC_END();
9095 return VINF_SUCCESS;
9096}
9097
9098
9099/** Opcode 0x80. */
9100FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
9101{
9102 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9103 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
9104 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9105
9106 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9107 {
9108 /* register target */
9109 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9110 IEMOP_HLP_NO_LOCK_PREFIX();
9111 IEM_MC_BEGIN(3, 0);
9112 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9113 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9114 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9115
9116 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9117 IEM_MC_REF_EFLAGS(pEFlags);
9118 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9119
9120 IEM_MC_ADVANCE_RIP();
9121 IEM_MC_END();
9122 }
9123 else
9124 {
9125 /* memory target */
9126 uint32_t fAccess;
9127 if (pImpl->pfnLockedU8)
9128 fAccess = IEM_ACCESS_DATA_RW;
9129 else
9130 { /* CMP */
9131 IEMOP_HLP_NO_LOCK_PREFIX();
9132 fAccess = IEM_ACCESS_DATA_R;
9133 }
9134 IEM_MC_BEGIN(3, 2);
9135 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9136 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9137 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9138
9139 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9140 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9141 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9142
9143 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9144 IEM_MC_FETCH_EFLAGS(EFlags);
9145 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9146 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9147 else
9148 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
9149
9150 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
9151 IEM_MC_COMMIT_EFLAGS(EFlags);
9152 IEM_MC_ADVANCE_RIP();
9153 IEM_MC_END();
9154 }
9155 return VINF_SUCCESS;
9156}
9157
9158
9159/** Opcode 0x81. */
9160FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
9161{
9162 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9163 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
9164 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9165
9166 switch (pIemCpu->enmEffOpSize)
9167 {
9168 case IEMMODE_16BIT:
9169 {
9170 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9171 {
9172 /* register target */
9173 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9174 IEMOP_HLP_NO_LOCK_PREFIX();
9175 IEM_MC_BEGIN(3, 0);
9176 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9177 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
9178 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9179
9180 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9181 IEM_MC_REF_EFLAGS(pEFlags);
9182 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9183
9184 IEM_MC_ADVANCE_RIP();
9185 IEM_MC_END();
9186 }
9187 else
9188 {
9189 /* memory target */
9190 uint32_t fAccess;
9191 if (pImpl->pfnLockedU16)
9192 fAccess = IEM_ACCESS_DATA_RW;
9193 else
9194 { /* CMP, TEST */
9195 IEMOP_HLP_NO_LOCK_PREFIX();
9196 fAccess = IEM_ACCESS_DATA_R;
9197 }
9198 IEM_MC_BEGIN(3, 2);
9199 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9200 IEM_MC_ARG(uint16_t, u16Src, 1);
9201 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9202 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9203
9204 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
9205 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9206 IEM_MC_ASSIGN(u16Src, u16Imm);
9207 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9208 IEM_MC_FETCH_EFLAGS(EFlags);
9209 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9210 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9211 else
9212 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9213
9214 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9215 IEM_MC_COMMIT_EFLAGS(EFlags);
9216 IEM_MC_ADVANCE_RIP();
9217 IEM_MC_END();
9218 }
9219 break;
9220 }
9221
9222 case IEMMODE_32BIT:
9223 {
9224 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9225 {
9226 /* register target */
9227 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9228 IEMOP_HLP_NO_LOCK_PREFIX();
9229 IEM_MC_BEGIN(3, 0);
9230 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9231 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
9232 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9233
9234 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9235 IEM_MC_REF_EFLAGS(pEFlags);
9236 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9237 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9238
9239 IEM_MC_ADVANCE_RIP();
9240 IEM_MC_END();
9241 }
9242 else
9243 {
9244 /* memory target */
9245 uint32_t fAccess;
9246 if (pImpl->pfnLockedU32)
9247 fAccess = IEM_ACCESS_DATA_RW;
9248 else
9249 { /* CMP, TEST */
9250 IEMOP_HLP_NO_LOCK_PREFIX();
9251 fAccess = IEM_ACCESS_DATA_R;
9252 }
9253 IEM_MC_BEGIN(3, 2);
9254 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9255 IEM_MC_ARG(uint32_t, u32Src, 1);
9256 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9257 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9258
9259 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9260 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9261 IEM_MC_ASSIGN(u32Src, u32Imm);
9262 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9263 IEM_MC_FETCH_EFLAGS(EFlags);
9264 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9265 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9266 else
9267 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9268
9269 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9270 IEM_MC_COMMIT_EFLAGS(EFlags);
9271 IEM_MC_ADVANCE_RIP();
9272 IEM_MC_END();
9273 }
9274 break;
9275 }
9276
9277 case IEMMODE_64BIT:
9278 {
9279 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9280 {
9281 /* register target */
9282 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9283 IEMOP_HLP_NO_LOCK_PREFIX();
9284 IEM_MC_BEGIN(3, 0);
9285 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9286 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
9287 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9288
9289 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9290 IEM_MC_REF_EFLAGS(pEFlags);
9291 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9292
9293 IEM_MC_ADVANCE_RIP();
9294 IEM_MC_END();
9295 }
9296 else
9297 {
9298 /* memory target */
9299 uint32_t fAccess;
9300 if (pImpl->pfnLockedU64)
9301 fAccess = IEM_ACCESS_DATA_RW;
9302 else
9303 { /* CMP */
9304 IEMOP_HLP_NO_LOCK_PREFIX();
9305 fAccess = IEM_ACCESS_DATA_R;
9306 }
9307 IEM_MC_BEGIN(3, 2);
9308 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9309 IEM_MC_ARG(uint64_t, u64Src, 1);
9310 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9311 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9312
9313 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9314 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9315 IEM_MC_ASSIGN(u64Src, u64Imm);
9316 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9317 IEM_MC_FETCH_EFLAGS(EFlags);
9318 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9319 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9320 else
9321 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9322
9323 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9324 IEM_MC_COMMIT_EFLAGS(EFlags);
9325 IEM_MC_ADVANCE_RIP();
9326 IEM_MC_END();
9327 }
9328 break;
9329 }
9330 }
9331 return VINF_SUCCESS;
9332}
9333
9334
9335/** Opcode 0x82. */
9336FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
9337{
9338 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
9339 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
9340}
9341
9342
9343/** Opcode 0x83. */
9344FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
9345{
9346 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9347 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
9348 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9349
9350 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9351 {
9352 /*
9353 * Register target
9354 */
9355 IEMOP_HLP_NO_LOCK_PREFIX();
9356 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9357 switch (pIemCpu->enmEffOpSize)
9358 {
9359 case IEMMODE_16BIT:
9360 {
9361 IEM_MC_BEGIN(3, 0);
9362 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9363 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
9364 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9365
9366 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9367 IEM_MC_REF_EFLAGS(pEFlags);
9368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9369
9370 IEM_MC_ADVANCE_RIP();
9371 IEM_MC_END();
9372 break;
9373 }
9374
9375 case IEMMODE_32BIT:
9376 {
9377 IEM_MC_BEGIN(3, 0);
9378 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9379 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
9380 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9381
9382 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9383 IEM_MC_REF_EFLAGS(pEFlags);
9384 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9385 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9386
9387 IEM_MC_ADVANCE_RIP();
9388 IEM_MC_END();
9389 break;
9390 }
9391
9392 case IEMMODE_64BIT:
9393 {
9394 IEM_MC_BEGIN(3, 0);
9395 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9396 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
9397 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9398
9399 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9400 IEM_MC_REF_EFLAGS(pEFlags);
9401 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9402
9403 IEM_MC_ADVANCE_RIP();
9404 IEM_MC_END();
9405 break;
9406 }
9407 }
9408 }
9409 else
9410 {
9411 /*
9412 * Memory target.
9413 */
9414 uint32_t fAccess;
9415 if (pImpl->pfnLockedU16)
9416 fAccess = IEM_ACCESS_DATA_RW;
9417 else
9418 { /* CMP */
9419 IEMOP_HLP_NO_LOCK_PREFIX();
9420 fAccess = IEM_ACCESS_DATA_R;
9421 }
9422
9423 switch (pIemCpu->enmEffOpSize)
9424 {
9425 case IEMMODE_16BIT:
9426 {
9427 IEM_MC_BEGIN(3, 2);
9428 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9429 IEM_MC_ARG(uint16_t, u16Src, 1);
9430 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9431 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9432
9433 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9434 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9435 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
9436 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9437 IEM_MC_FETCH_EFLAGS(EFlags);
9438 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9439 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9440 else
9441 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9442
9443 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9444 IEM_MC_COMMIT_EFLAGS(EFlags);
9445 IEM_MC_ADVANCE_RIP();
9446 IEM_MC_END();
9447 break;
9448 }
9449
9450 case IEMMODE_32BIT:
9451 {
9452 IEM_MC_BEGIN(3, 2);
9453 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9454 IEM_MC_ARG(uint32_t, u32Src, 1);
9455 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9456 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9457
9458 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9459 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9460 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
9461 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9462 IEM_MC_FETCH_EFLAGS(EFlags);
9463 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9464 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9465 else
9466 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9467
9468 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9469 IEM_MC_COMMIT_EFLAGS(EFlags);
9470 IEM_MC_ADVANCE_RIP();
9471 IEM_MC_END();
9472 break;
9473 }
9474
9475 case IEMMODE_64BIT:
9476 {
9477 IEM_MC_BEGIN(3, 2);
9478 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9479 IEM_MC_ARG(uint64_t, u64Src, 1);
9480 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9481 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9482
9483 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9484 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9485 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
9486 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9487 IEM_MC_FETCH_EFLAGS(EFlags);
9488 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9489 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9490 else
9491 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9492
9493 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9494 IEM_MC_COMMIT_EFLAGS(EFlags);
9495 IEM_MC_ADVANCE_RIP();
9496 IEM_MC_END();
9497 break;
9498 }
9499 }
9500 }
9501 return VINF_SUCCESS;
9502}
9503
9504
9505/** Opcode 0x84. */
9506FNIEMOP_DEF(iemOp_test_Eb_Gb)
9507{
9508 IEMOP_MNEMONIC("test Eb,Gb");
9509 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9510 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9511 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
9512}
9513
9514
9515/** Opcode 0x85. */
9516FNIEMOP_DEF(iemOp_test_Ev_Gv)
9517{
9518 IEMOP_MNEMONIC("test Ev,Gv");
9519 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9520 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9521 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
9522}
9523
9524
9525/** Opcode 0x86. */
9526FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
9527{
9528 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9529 IEMOP_MNEMONIC("xchg Eb,Gb");
9530
9531 /*
9532 * If rm is denoting a register, no more instruction bytes.
9533 */
9534 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9535 {
9536 IEMOP_HLP_NO_LOCK_PREFIX();
9537
9538 IEM_MC_BEGIN(0, 2);
9539 IEM_MC_LOCAL(uint8_t, uTmp1);
9540 IEM_MC_LOCAL(uint8_t, uTmp2);
9541
9542 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9543 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9544 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9545 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9546
9547 IEM_MC_ADVANCE_RIP();
9548 IEM_MC_END();
9549 }
9550 else
9551 {
9552 /*
9553 * We're accessing memory.
9554 */
9555/** @todo the register must be committed separately! */
9556 IEM_MC_BEGIN(2, 2);
9557 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
9558 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
9559 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9560
9561 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9562 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9563 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9564 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
9565 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
9566
9567 IEM_MC_ADVANCE_RIP();
9568 IEM_MC_END();
9569 }
9570 return VINF_SUCCESS;
9571}
9572
9573
9574/** Opcode 0x87. */
9575FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
9576{
9577 IEMOP_MNEMONIC("xchg Ev,Gv");
9578 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9579
9580 /*
9581 * If rm is denoting a register, no more instruction bytes.
9582 */
9583 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9584 {
9585 IEMOP_HLP_NO_LOCK_PREFIX();
9586
9587 switch (pIemCpu->enmEffOpSize)
9588 {
9589 case IEMMODE_16BIT:
9590 IEM_MC_BEGIN(0, 2);
9591 IEM_MC_LOCAL(uint16_t, uTmp1);
9592 IEM_MC_LOCAL(uint16_t, uTmp2);
9593
9594 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9595 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9596 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9597 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9598
9599 IEM_MC_ADVANCE_RIP();
9600 IEM_MC_END();
9601 return VINF_SUCCESS;
9602
9603 case IEMMODE_32BIT:
9604 IEM_MC_BEGIN(0, 2);
9605 IEM_MC_LOCAL(uint32_t, uTmp1);
9606 IEM_MC_LOCAL(uint32_t, uTmp2);
9607
9608 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9609 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9610 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9611 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9612
9613 IEM_MC_ADVANCE_RIP();
9614 IEM_MC_END();
9615 return VINF_SUCCESS;
9616
9617 case IEMMODE_64BIT:
9618 IEM_MC_BEGIN(0, 2);
9619 IEM_MC_LOCAL(uint64_t, uTmp1);
9620 IEM_MC_LOCAL(uint64_t, uTmp2);
9621
9622 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9623 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9624 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9625 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9626
9627 IEM_MC_ADVANCE_RIP();
9628 IEM_MC_END();
9629 return VINF_SUCCESS;
9630
9631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9632 }
9633 }
9634 else
9635 {
9636 /*
9637 * We're accessing memory.
9638 */
9639 switch (pIemCpu->enmEffOpSize)
9640 {
9641/** @todo the register must be committed separately! */
9642 case IEMMODE_16BIT:
9643 IEM_MC_BEGIN(2, 2);
9644 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
9645 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
9646 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9647
9648 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9649 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9650 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9651 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
9652 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
9653
9654 IEM_MC_ADVANCE_RIP();
9655 IEM_MC_END();
9656 return VINF_SUCCESS;
9657
9658 case IEMMODE_32BIT:
9659 IEM_MC_BEGIN(2, 2);
9660 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
9661 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
9662 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9663
9664 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9665 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9666 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9667 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
9668 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
9669
9670 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
9671 IEM_MC_ADVANCE_RIP();
9672 IEM_MC_END();
9673 return VINF_SUCCESS;
9674
9675 case IEMMODE_64BIT:
9676 IEM_MC_BEGIN(2, 2);
9677 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
9678 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
9679 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9680
9681 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9682 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9683 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9684 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
9685 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
9686
9687 IEM_MC_ADVANCE_RIP();
9688 IEM_MC_END();
9689 return VINF_SUCCESS;
9690
9691 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9692 }
9693 }
9694}
9695
9696
9697/** Opcode 0x88. */
9698FNIEMOP_DEF(iemOp_mov_Eb_Gb)
9699{
9700 IEMOP_MNEMONIC("mov Eb,Gb");
9701
9702 uint8_t bRm;
9703 IEM_OPCODE_GET_NEXT_U8(&bRm);
9704 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9705
9706 /*
9707 * If rm is denoting a register, no more instruction bytes.
9708 */
9709 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9710 {
9711 IEM_MC_BEGIN(0, 1);
9712 IEM_MC_LOCAL(uint8_t, u8Value);
9713 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9714 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
9715 IEM_MC_ADVANCE_RIP();
9716 IEM_MC_END();
9717 }
9718 else
9719 {
9720 /*
9721 * We're writing a register to memory.
9722 */
9723 IEM_MC_BEGIN(0, 2);
9724 IEM_MC_LOCAL(uint8_t, u8Value);
9725 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9726 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9727 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9728 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
9729 IEM_MC_ADVANCE_RIP();
9730 IEM_MC_END();
9731 }
9732 return VINF_SUCCESS;
9733
9734}
9735
9736
9737/** Opcode 0x89. */
9738FNIEMOP_DEF(iemOp_mov_Ev_Gv)
9739{
9740 IEMOP_MNEMONIC("mov Ev,Gv");
9741
9742 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9743 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9744
9745 /*
9746 * If rm is denoting a register, no more instruction bytes.
9747 */
9748 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9749 {
9750 switch (pIemCpu->enmEffOpSize)
9751 {
9752 case IEMMODE_16BIT:
9753 IEM_MC_BEGIN(0, 1);
9754 IEM_MC_LOCAL(uint16_t, u16Value);
9755 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9756 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
9757 IEM_MC_ADVANCE_RIP();
9758 IEM_MC_END();
9759 break;
9760
9761 case IEMMODE_32BIT:
9762 IEM_MC_BEGIN(0, 1);
9763 IEM_MC_LOCAL(uint32_t, u32Value);
9764 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9765 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
9766 IEM_MC_ADVANCE_RIP();
9767 IEM_MC_END();
9768 break;
9769
9770 case IEMMODE_64BIT:
9771 IEM_MC_BEGIN(0, 1);
9772 IEM_MC_LOCAL(uint64_t, u64Value);
9773 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9774 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
9775 IEM_MC_ADVANCE_RIP();
9776 IEM_MC_END();
9777 break;
9778 }
9779 }
9780 else
9781 {
9782 /*
9783 * We're writing a register to memory.
9784 */
9785 switch (pIemCpu->enmEffOpSize)
9786 {
9787 case IEMMODE_16BIT:
9788 IEM_MC_BEGIN(0, 2);
9789 IEM_MC_LOCAL(uint16_t, u16Value);
9790 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9792 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9793 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
9794 IEM_MC_ADVANCE_RIP();
9795 IEM_MC_END();
9796 break;
9797
9798 case IEMMODE_32BIT:
9799 IEM_MC_BEGIN(0, 2);
9800 IEM_MC_LOCAL(uint32_t, u32Value);
9801 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9802 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9803 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9804 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
9805 IEM_MC_ADVANCE_RIP();
9806 IEM_MC_END();
9807 break;
9808
9809 case IEMMODE_64BIT:
9810 IEM_MC_BEGIN(0, 2);
9811 IEM_MC_LOCAL(uint64_t, u64Value);
9812 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9813 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9814 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9815 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
9816 IEM_MC_ADVANCE_RIP();
9817 IEM_MC_END();
9818 break;
9819 }
9820 }
9821 return VINF_SUCCESS;
9822}
9823
9824
9825/** Opcode 0x8a. */
9826FNIEMOP_DEF(iemOp_mov_Gb_Eb)
9827{
9828 IEMOP_MNEMONIC("mov Gb,Eb");
9829
9830 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9831 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9832
9833 /*
9834 * If rm is denoting a register, no more instruction bytes.
9835 */
9836 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9837 {
9838 IEM_MC_BEGIN(0, 1);
9839 IEM_MC_LOCAL(uint8_t, u8Value);
9840 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9841 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
9842 IEM_MC_ADVANCE_RIP();
9843 IEM_MC_END();
9844 }
9845 else
9846 {
9847 /*
9848 * We're loading a register from memory.
9849 */
9850 IEM_MC_BEGIN(0, 2);
9851 IEM_MC_LOCAL(uint8_t, u8Value);
9852 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9853 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9854 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
9855 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
9856 IEM_MC_ADVANCE_RIP();
9857 IEM_MC_END();
9858 }
9859 return VINF_SUCCESS;
9860}
9861
9862
9863/** Opcode 0x8b. */
9864FNIEMOP_DEF(iemOp_mov_Gv_Ev)
9865{
9866 IEMOP_MNEMONIC("mov Gv,Ev");
9867
9868 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9869 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9870
9871 /*
9872 * If rm is denoting a register, no more instruction bytes.
9873 */
9874 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9875 {
9876 switch (pIemCpu->enmEffOpSize)
9877 {
9878 case IEMMODE_16BIT:
9879 IEM_MC_BEGIN(0, 1);
9880 IEM_MC_LOCAL(uint16_t, u16Value);
9881 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9882 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
9883 IEM_MC_ADVANCE_RIP();
9884 IEM_MC_END();
9885 break;
9886
9887 case IEMMODE_32BIT:
9888 IEM_MC_BEGIN(0, 1);
9889 IEM_MC_LOCAL(uint32_t, u32Value);
9890 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9891 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
9892 IEM_MC_ADVANCE_RIP();
9893 IEM_MC_END();
9894 break;
9895
9896 case IEMMODE_64BIT:
9897 IEM_MC_BEGIN(0, 1);
9898 IEM_MC_LOCAL(uint64_t, u64Value);
9899 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9900 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
9901 IEM_MC_ADVANCE_RIP();
9902 IEM_MC_END();
9903 break;
9904 }
9905 }
9906 else
9907 {
9908 /*
9909 * We're loading a register from memory.
9910 */
9911 switch (pIemCpu->enmEffOpSize)
9912 {
9913 case IEMMODE_16BIT:
9914 IEM_MC_BEGIN(0, 2);
9915 IEM_MC_LOCAL(uint16_t, u16Value);
9916 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9917 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9918 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
9919 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
9920 IEM_MC_ADVANCE_RIP();
9921 IEM_MC_END();
9922 break;
9923
9924 case IEMMODE_32BIT:
9925 IEM_MC_BEGIN(0, 2);
9926 IEM_MC_LOCAL(uint32_t, u32Value);
9927 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9928 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9929 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
9930 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
9931 IEM_MC_ADVANCE_RIP();
9932 IEM_MC_END();
9933 break;
9934
9935 case IEMMODE_64BIT:
9936 IEM_MC_BEGIN(0, 2);
9937 IEM_MC_LOCAL(uint64_t, u64Value);
9938 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9939 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
9940 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
9941 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
9942 IEM_MC_ADVANCE_RIP();
9943 IEM_MC_END();
9944 break;
9945 }
9946 }
9947 return VINF_SUCCESS;
9948}
9949
9950
9951/** Opcode 0x63. */
9952FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev)
9953{
9954 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9955 return FNIEMOP_CALL(iemOp_arpl_Ew_Gw);
9956 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
9957 return FNIEMOP_CALL(iemOp_mov_Gv_Ev);
9958 return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev);
9959}
9960
9961
9962/** Opcode 0x8c. */
9963FNIEMOP_DEF(iemOp_mov_Ev_Sw)
9964{
9965 IEMOP_MNEMONIC("mov Ev,Sw");
9966
9967 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9968 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9969
9970 /*
9971 * Check that the destination register exists. The REX.R prefix is ignored.
9972 */
9973 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
9974 if ( iSegReg > X86_SREG_GS)
9975 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9976
9977 /*
9978 * If rm is denoting a register, no more instruction bytes.
9979 * In that case, the operand size is respected and the upper bits are
9980 * cleared (starting with some pentium).
9981 */
9982 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9983 {
9984 switch (pIemCpu->enmEffOpSize)
9985 {
9986 case IEMMODE_16BIT:
9987 IEM_MC_BEGIN(0, 1);
9988 IEM_MC_LOCAL(uint16_t, u16Value);
9989 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
9990 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
9991 IEM_MC_ADVANCE_RIP();
9992 IEM_MC_END();
9993 break;
9994
9995 case IEMMODE_32BIT:
9996 IEM_MC_BEGIN(0, 1);
9997 IEM_MC_LOCAL(uint32_t, u32Value);
9998 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
9999 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
10000 IEM_MC_ADVANCE_RIP();
10001 IEM_MC_END();
10002 break;
10003
10004 case IEMMODE_64BIT:
10005 IEM_MC_BEGIN(0, 1);
10006 IEM_MC_LOCAL(uint64_t, u64Value);
10007 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
10008 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
10009 IEM_MC_ADVANCE_RIP();
10010 IEM_MC_END();
10011 break;
10012 }
10013 }
10014 else
10015 {
10016 /*
10017 * We're saving the register to memory. The access is word sized
10018 * regardless of operand size prefixes.
10019 */
10020#if 0 /* not necessary */
10021 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10022#endif
10023 IEM_MC_BEGIN(0, 2);
10024 IEM_MC_LOCAL(uint16_t, u16Value);
10025 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10026 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10027 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
10028 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
10029 IEM_MC_ADVANCE_RIP();
10030 IEM_MC_END();
10031 }
10032 return VINF_SUCCESS;
10033}
10034
10035
10036
10037
10038/** Opcode 0x8d. */
10039FNIEMOP_DEF(iemOp_lea_Gv_M)
10040{
10041 IEMOP_MNEMONIC("lea Gv,M");
10042 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10043 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10044 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10045 return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */
10046
10047 switch (pIemCpu->enmEffOpSize)
10048 {
10049 case IEMMODE_16BIT:
10050 IEM_MC_BEGIN(0, 2);
10051 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10052 IEM_MC_LOCAL(uint16_t, u16Cast);
10053 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10054 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
10055 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
10056 IEM_MC_ADVANCE_RIP();
10057 IEM_MC_END();
10058 return VINF_SUCCESS;
10059
10060 case IEMMODE_32BIT:
10061 IEM_MC_BEGIN(0, 2);
10062 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10063 IEM_MC_LOCAL(uint32_t, u32Cast);
10064 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10065 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
10066 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
10067 IEM_MC_ADVANCE_RIP();
10068 IEM_MC_END();
10069 return VINF_SUCCESS;
10070
10071 case IEMMODE_64BIT:
10072 IEM_MC_BEGIN(0, 1);
10073 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10074 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10075 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
10076 IEM_MC_ADVANCE_RIP();
10077 IEM_MC_END();
10078 return VINF_SUCCESS;
10079 }
10080 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
10081}
10082
10083
10084/** Opcode 0x8e. */
10085FNIEMOP_DEF(iemOp_mov_Sw_Ev)
10086{
10087 IEMOP_MNEMONIC("mov Sw,Ev");
10088
10089 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10090 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10091
10092 /*
10093 * The practical operand size is 16-bit.
10094 */
10095#if 0 /* not necessary */
10096 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10097#endif
10098
10099 /*
10100 * Check that the destination register exists and can be used with this
10101 * instruction. The REX.R prefix is ignored.
10102 */
10103 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
10104 if ( iSegReg == X86_SREG_CS
10105 || iSegReg > X86_SREG_GS)
10106 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10107
10108 /*
10109 * If rm is denoting a register, no more instruction bytes.
10110 */
10111 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10112 {
10113 IEM_MC_BEGIN(2, 0);
10114 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10115 IEM_MC_ARG(uint16_t, u16Value, 1);
10116 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10117 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10118 IEM_MC_END();
10119 }
10120 else
10121 {
10122 /*
10123 * We're loading the register from memory. The access is word sized
10124 * regardless of operand size prefixes.
10125 */
10126 IEM_MC_BEGIN(2, 1);
10127 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10128 IEM_MC_ARG(uint16_t, u16Value, 1);
10129 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10130 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10131 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10132 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10133 IEM_MC_END();
10134 }
10135 return VINF_SUCCESS;
10136}
10137
10138
10139/** Opcode 0x8f /0. */
10140FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
10141{
10142 /* This bugger is rather annoying as it requires rSP to be updated before
10143 doing the effective address calculations. Will eventually require a
10144 split between the R/M+SIB decoding and the effective address
10145 calculation - which is something that is required for any attempt at
10146 reusing this code for a recompiler. It may also be good to have if we
10147 need to delay #UD exception caused by invalid lock prefixes.
10148
10149 For now, we'll do a mostly safe interpreter-only implementation here. */
10150 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
10151 * now until tests show it's checked.. */
10152 IEMOP_MNEMONIC("pop Ev");
10153 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10154
10155 /* Register access is relatively easy and can share code. */
10156 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10157 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10158
10159 /*
10160 * Memory target.
10161 *
10162 * Intel says that RSP is incremented before it's used in any effective
10163 * address calcuations. This means some serious extra annoyance here since
10164 * we decode and calculate the effective address in one step and like to
10165 * delay committing registers till everything is done.
10166 *
10167 * So, we'll decode and calculate the effective address twice. This will
10168 * require some recoding if turned into a recompiler.
10169 */
10170 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
10171
10172#ifndef TST_IEM_CHECK_MC
10173 /* Calc effective address with modified ESP. */
10174 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
10175 RTGCPTR GCPtrEff;
10176 VBOXSTRICTRC rcStrict;
10177 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10178 if (rcStrict != VINF_SUCCESS)
10179 return rcStrict;
10180 pIemCpu->offOpcode = offOpcodeSaved;
10181
10182 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10183 uint64_t const RspSaved = pCtx->rsp;
10184 switch (pIemCpu->enmEffOpSize)
10185 {
10186 case IEMMODE_16BIT: iemRegAddToRsp(pIemCpu, pCtx, 2); break;
10187 case IEMMODE_32BIT: iemRegAddToRsp(pIemCpu, pCtx, 4); break;
10188 case IEMMODE_64BIT: iemRegAddToRsp(pIemCpu, pCtx, 8); break;
10189 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10190 }
10191 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10192 Assert(rcStrict == VINF_SUCCESS);
10193 pCtx->rsp = RspSaved;
10194
10195 /* Perform the operation - this should be CImpl. */
10196 RTUINT64U TmpRsp;
10197 TmpRsp.u = pCtx->rsp;
10198 switch (pIemCpu->enmEffOpSize)
10199 {
10200 case IEMMODE_16BIT:
10201 {
10202 uint16_t u16Value;
10203 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
10204 if (rcStrict == VINF_SUCCESS)
10205 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
10206 break;
10207 }
10208
10209 case IEMMODE_32BIT:
10210 {
10211 uint32_t u32Value;
10212 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
10213 if (rcStrict == VINF_SUCCESS)
10214 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
10215 break;
10216 }
10217
10218 case IEMMODE_64BIT:
10219 {
10220 uint64_t u64Value;
10221 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
10222 if (rcStrict == VINF_SUCCESS)
10223 rcStrict = iemMemStoreDataU64(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
10224 break;
10225 }
10226
10227 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10228 }
10229 if (rcStrict == VINF_SUCCESS)
10230 {
10231 pCtx->rsp = TmpRsp.u;
10232 iemRegUpdateRipAndClearRF(pIemCpu);
10233 }
10234 return rcStrict;
10235
10236#else
10237 return VERR_IEM_IPE_2;
10238#endif
10239}
10240
10241
10242/** Opcode 0x8f. */
10243FNIEMOP_DEF(iemOp_Grp1A)
10244{
10245 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10246 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only pop Ev in this group. */
10247 return IEMOP_RAISE_INVALID_OPCODE();
10248 return FNIEMOP_CALL_1(iemOp_pop_Ev, bRm);
10249}
10250
10251
10252/**
10253 * Common 'xchg reg,rAX' helper.
10254 */
10255FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
10256{
10257 IEMOP_HLP_NO_LOCK_PREFIX();
10258
10259 iReg |= pIemCpu->uRexB;
10260 switch (pIemCpu->enmEffOpSize)
10261 {
10262 case IEMMODE_16BIT:
10263 IEM_MC_BEGIN(0, 2);
10264 IEM_MC_LOCAL(uint16_t, u16Tmp1);
10265 IEM_MC_LOCAL(uint16_t, u16Tmp2);
10266 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
10267 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
10268 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
10269 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
10270 IEM_MC_ADVANCE_RIP();
10271 IEM_MC_END();
10272 return VINF_SUCCESS;
10273
10274 case IEMMODE_32BIT:
10275 IEM_MC_BEGIN(0, 2);
10276 IEM_MC_LOCAL(uint32_t, u32Tmp1);
10277 IEM_MC_LOCAL(uint32_t, u32Tmp2);
10278 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
10279 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
10280 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
10281 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
10282 IEM_MC_ADVANCE_RIP();
10283 IEM_MC_END();
10284 return VINF_SUCCESS;
10285
10286 case IEMMODE_64BIT:
10287 IEM_MC_BEGIN(0, 2);
10288 IEM_MC_LOCAL(uint64_t, u64Tmp1);
10289 IEM_MC_LOCAL(uint64_t, u64Tmp2);
10290 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
10291 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
10292 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
10293 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
10294 IEM_MC_ADVANCE_RIP();
10295 IEM_MC_END();
10296 return VINF_SUCCESS;
10297
10298 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10299 }
10300}
10301
10302
10303/** Opcode 0x90. */
10304FNIEMOP_DEF(iemOp_nop)
10305{
10306 /* R8/R8D and RAX/EAX can be exchanged. */
10307 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
10308 {
10309 IEMOP_MNEMONIC("xchg r8,rAX");
10310 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
10311 }
10312
10313 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
10314 IEMOP_MNEMONIC("pause");
10315 else
10316 IEMOP_MNEMONIC("nop");
10317 IEM_MC_BEGIN(0, 0);
10318 IEM_MC_ADVANCE_RIP();
10319 IEM_MC_END();
10320 return VINF_SUCCESS;
10321}
10322
10323
10324/** Opcode 0x91. */
10325FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
10326{
10327 IEMOP_MNEMONIC("xchg rCX,rAX");
10328 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
10329}
10330
10331
10332/** Opcode 0x92. */
10333FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
10334{
10335 IEMOP_MNEMONIC("xchg rDX,rAX");
10336 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
10337}
10338
10339
10340/** Opcode 0x93. */
10341FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
10342{
10343 IEMOP_MNEMONIC("xchg rBX,rAX");
10344 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
10345}
10346
10347
10348/** Opcode 0x94. */
10349FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
10350{
10351 IEMOP_MNEMONIC("xchg rSX,rAX");
10352 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
10353}
10354
10355
10356/** Opcode 0x95. */
10357FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
10358{
10359 IEMOP_MNEMONIC("xchg rBP,rAX");
10360 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
10361}
10362
10363
10364/** Opcode 0x96. */
10365FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
10366{
10367 IEMOP_MNEMONIC("xchg rSI,rAX");
10368 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
10369}
10370
10371
10372/** Opcode 0x97. */
10373FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
10374{
10375 IEMOP_MNEMONIC("xchg rDI,rAX");
10376 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
10377}
10378
10379
10380/** Opcode 0x98. */
10381FNIEMOP_DEF(iemOp_cbw)
10382{
10383 IEMOP_HLP_NO_LOCK_PREFIX();
10384 switch (pIemCpu->enmEffOpSize)
10385 {
10386 case IEMMODE_16BIT:
10387 IEMOP_MNEMONIC("cbw");
10388 IEM_MC_BEGIN(0, 1);
10389 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
10390 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
10391 } IEM_MC_ELSE() {
10392 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
10393 } IEM_MC_ENDIF();
10394 IEM_MC_ADVANCE_RIP();
10395 IEM_MC_END();
10396 return VINF_SUCCESS;
10397
10398 case IEMMODE_32BIT:
10399 IEMOP_MNEMONIC("cwde");
10400 IEM_MC_BEGIN(0, 1);
10401 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10402 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
10403 } IEM_MC_ELSE() {
10404 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
10405 } IEM_MC_ENDIF();
10406 IEM_MC_ADVANCE_RIP();
10407 IEM_MC_END();
10408 return VINF_SUCCESS;
10409
10410 case IEMMODE_64BIT:
10411 IEMOP_MNEMONIC("cdqe");
10412 IEM_MC_BEGIN(0, 1);
10413 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10414 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
10415 } IEM_MC_ELSE() {
10416 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
10417 } IEM_MC_ENDIF();
10418 IEM_MC_ADVANCE_RIP();
10419 IEM_MC_END();
10420 return VINF_SUCCESS;
10421
10422 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10423 }
10424}
10425
10426
10427/** Opcode 0x99. */
10428FNIEMOP_DEF(iemOp_cwd)
10429{
10430 IEMOP_HLP_NO_LOCK_PREFIX();
10431 switch (pIemCpu->enmEffOpSize)
10432 {
10433 case IEMMODE_16BIT:
10434 IEMOP_MNEMONIC("cwd");
10435 IEM_MC_BEGIN(0, 1);
10436 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10437 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
10438 } IEM_MC_ELSE() {
10439 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
10440 } IEM_MC_ENDIF();
10441 IEM_MC_ADVANCE_RIP();
10442 IEM_MC_END();
10443 return VINF_SUCCESS;
10444
10445 case IEMMODE_32BIT:
10446 IEMOP_MNEMONIC("cdq");
10447 IEM_MC_BEGIN(0, 1);
10448 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10449 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
10450 } IEM_MC_ELSE() {
10451 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
10452 } IEM_MC_ENDIF();
10453 IEM_MC_ADVANCE_RIP();
10454 IEM_MC_END();
10455 return VINF_SUCCESS;
10456
10457 case IEMMODE_64BIT:
10458 IEMOP_MNEMONIC("cqo");
10459 IEM_MC_BEGIN(0, 1);
10460 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
10461 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
10462 } IEM_MC_ELSE() {
10463 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
10464 } IEM_MC_ENDIF();
10465 IEM_MC_ADVANCE_RIP();
10466 IEM_MC_END();
10467 return VINF_SUCCESS;
10468
10469 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10470 }
10471}
10472
10473
10474/** Opcode 0x9a. */
10475FNIEMOP_DEF(iemOp_call_Ap)
10476{
10477 IEMOP_MNEMONIC("call Ap");
10478 IEMOP_HLP_NO_64BIT();
10479
10480 /* Decode the far pointer address and pass it on to the far call C implementation. */
10481 uint32_t offSeg;
10482 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10483 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10484 else
10485 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10486 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10487 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
10488 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
10489}
10490
10491
10492/** Opcode 0x9b. (aka fwait) */
10493FNIEMOP_DEF(iemOp_wait)
10494{
10495 IEMOP_MNEMONIC("wait");
10496 IEMOP_HLP_NO_LOCK_PREFIX();
10497
10498 IEM_MC_BEGIN(0, 0);
10499 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10500 IEM_MC_MAYBE_RAISE_FPU_XCPT();
10501 IEM_MC_ADVANCE_RIP();
10502 IEM_MC_END();
10503 return VINF_SUCCESS;
10504}
10505
10506
10507/** Opcode 0x9c. */
10508FNIEMOP_DEF(iemOp_pushf_Fv)
10509{
10510 IEMOP_HLP_NO_LOCK_PREFIX();
10511 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10512 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
10513}
10514
10515
10516/** Opcode 0x9d. */
10517FNIEMOP_DEF(iemOp_popf_Fv)
10518{
10519 IEMOP_HLP_NO_LOCK_PREFIX();
10520 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10521 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
10522}
10523
10524
10525/** Opcode 0x9e. */
10526FNIEMOP_DEF(iemOp_sahf)
10527{
10528 IEMOP_MNEMONIC("sahf");
10529 IEMOP_HLP_NO_LOCK_PREFIX();
10530 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10531 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
10532 return IEMOP_RAISE_INVALID_OPCODE();
10533 IEM_MC_BEGIN(0, 2);
10534 IEM_MC_LOCAL(uint32_t, u32Flags);
10535 IEM_MC_LOCAL(uint32_t, EFlags);
10536 IEM_MC_FETCH_EFLAGS(EFlags);
10537 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
10538 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
10539 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
10540 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
10541 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
10542 IEM_MC_COMMIT_EFLAGS(EFlags);
10543 IEM_MC_ADVANCE_RIP();
10544 IEM_MC_END();
10545 return VINF_SUCCESS;
10546}
10547
10548
10549/** Opcode 0x9f. */
10550FNIEMOP_DEF(iemOp_lahf)
10551{
10552 IEMOP_MNEMONIC("lahf");
10553 IEMOP_HLP_NO_LOCK_PREFIX();
10554 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10555 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
10556 return IEMOP_RAISE_INVALID_OPCODE();
10557 IEM_MC_BEGIN(0, 1);
10558 IEM_MC_LOCAL(uint8_t, u8Flags);
10559 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
10560 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
10561 IEM_MC_ADVANCE_RIP();
10562 IEM_MC_END();
10563 return VINF_SUCCESS;
10564}
10565
10566
10567/**
10568 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
10569 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
10570 * prefixes. Will return on failures.
10571 * @param a_GCPtrMemOff The variable to store the offset in.
10572 */
10573#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
10574 do \
10575 { \
10576 switch (pIemCpu->enmEffAddrMode) \
10577 { \
10578 case IEMMODE_16BIT: \
10579 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
10580 break; \
10581 case IEMMODE_32BIT: \
10582 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
10583 break; \
10584 case IEMMODE_64BIT: \
10585 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
10586 break; \
10587 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
10588 } \
10589 IEMOP_HLP_NO_LOCK_PREFIX(); \
10590 } while (0)
10591
10592/** Opcode 0xa0. */
10593FNIEMOP_DEF(iemOp_mov_Al_Ob)
10594{
10595 /*
10596 * Get the offset and fend of lock prefixes.
10597 */
10598 RTGCPTR GCPtrMemOff;
10599 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10600
10601 /*
10602 * Fetch AL.
10603 */
10604 IEM_MC_BEGIN(0,1);
10605 IEM_MC_LOCAL(uint8_t, u8Tmp);
10606 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10607 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
10608 IEM_MC_ADVANCE_RIP();
10609 IEM_MC_END();
10610 return VINF_SUCCESS;
10611}
10612
10613
10614/** Opcode 0xa1. */
10615FNIEMOP_DEF(iemOp_mov_rAX_Ov)
10616{
10617 /*
10618 * Get the offset and fend of lock prefixes.
10619 */
10620 IEMOP_MNEMONIC("mov rAX,Ov");
10621 RTGCPTR GCPtrMemOff;
10622 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10623
10624 /*
10625 * Fetch rAX.
10626 */
10627 switch (pIemCpu->enmEffOpSize)
10628 {
10629 case IEMMODE_16BIT:
10630 IEM_MC_BEGIN(0,1);
10631 IEM_MC_LOCAL(uint16_t, u16Tmp);
10632 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10633 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10634 IEM_MC_ADVANCE_RIP();
10635 IEM_MC_END();
10636 return VINF_SUCCESS;
10637
10638 case IEMMODE_32BIT:
10639 IEM_MC_BEGIN(0,1);
10640 IEM_MC_LOCAL(uint32_t, u32Tmp);
10641 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10642 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
10643 IEM_MC_ADVANCE_RIP();
10644 IEM_MC_END();
10645 return VINF_SUCCESS;
10646
10647 case IEMMODE_64BIT:
10648 IEM_MC_BEGIN(0,1);
10649 IEM_MC_LOCAL(uint64_t, u64Tmp);
10650 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
10651 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
10652 IEM_MC_ADVANCE_RIP();
10653 IEM_MC_END();
10654 return VINF_SUCCESS;
10655
10656 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10657 }
10658}
10659
10660
10661/** Opcode 0xa2. */
10662FNIEMOP_DEF(iemOp_mov_Ob_AL)
10663{
10664 /*
10665 * Get the offset and fend of lock prefixes.
10666 */
10667 RTGCPTR GCPtrMemOff;
10668 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10669
10670 /*
10671 * Store AL.
10672 */
10673 IEM_MC_BEGIN(0,1);
10674 IEM_MC_LOCAL(uint8_t, u8Tmp);
10675 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
10676 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
10677 IEM_MC_ADVANCE_RIP();
10678 IEM_MC_END();
10679 return VINF_SUCCESS;
10680}
10681
10682
10683/** Opcode 0xa3. */
10684FNIEMOP_DEF(iemOp_mov_Ov_rAX)
10685{
10686 /*
10687 * Get the offset and fend of lock prefixes.
10688 */
10689 RTGCPTR GCPtrMemOff;
10690 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
10691
10692 /*
10693 * Store rAX.
10694 */
10695 switch (pIemCpu->enmEffOpSize)
10696 {
10697 case IEMMODE_16BIT:
10698 IEM_MC_BEGIN(0,1);
10699 IEM_MC_LOCAL(uint16_t, u16Tmp);
10700 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
10701 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
10702 IEM_MC_ADVANCE_RIP();
10703 IEM_MC_END();
10704 return VINF_SUCCESS;
10705
10706 case IEMMODE_32BIT:
10707 IEM_MC_BEGIN(0,1);
10708 IEM_MC_LOCAL(uint32_t, u32Tmp);
10709 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
10710 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
10711 IEM_MC_ADVANCE_RIP();
10712 IEM_MC_END();
10713 return VINF_SUCCESS;
10714
10715 case IEMMODE_64BIT:
10716 IEM_MC_BEGIN(0,1);
10717 IEM_MC_LOCAL(uint64_t, u64Tmp);
10718 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
10719 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
10720 IEM_MC_ADVANCE_RIP();
10721 IEM_MC_END();
10722 return VINF_SUCCESS;
10723
10724 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10725 }
10726}
10727
10728/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
10729#define IEM_MOVS_CASE(ValBits, AddrBits) \
10730 IEM_MC_BEGIN(0, 2); \
10731 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
10732 IEM_MC_LOCAL(RTGCPTR, uAddr); \
10733 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
10734 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
10735 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
10736 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
10737 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
10738 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10739 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10740 } IEM_MC_ELSE() { \
10741 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10742 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10743 } IEM_MC_ENDIF(); \
10744 IEM_MC_ADVANCE_RIP(); \
10745 IEM_MC_END();
10746
10747/** Opcode 0xa4. */
10748FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
10749{
10750 IEMOP_HLP_NO_LOCK_PREFIX();
10751
10752 /*
10753 * Use the C implementation if a repeat prefix is encountered.
10754 */
10755 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
10756 {
10757 IEMOP_MNEMONIC("rep movsb Xb,Yb");
10758 switch (pIemCpu->enmEffAddrMode)
10759 {
10760 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
10761 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
10762 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
10763 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10764 }
10765 }
10766 IEMOP_MNEMONIC("movsb Xb,Yb");
10767
10768 /*
10769 * Sharing case implementation with movs[wdq] below.
10770 */
10771 switch (pIemCpu->enmEffAddrMode)
10772 {
10773 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
10774 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
10775 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
10776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10777 }
10778 return VINF_SUCCESS;
10779}
10780
10781
10782/** Opcode 0xa5. */
10783FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
10784{
10785 IEMOP_HLP_NO_LOCK_PREFIX();
10786
10787 /*
10788 * Use the C implementation if a repeat prefix is encountered.
10789 */
10790 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
10791 {
10792 IEMOP_MNEMONIC("rep movs Xv,Yv");
10793 switch (pIemCpu->enmEffOpSize)
10794 {
10795 case IEMMODE_16BIT:
10796 switch (pIemCpu->enmEffAddrMode)
10797 {
10798 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
10799 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
10800 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
10801 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10802 }
10803 break;
10804 case IEMMODE_32BIT:
10805 switch (pIemCpu->enmEffAddrMode)
10806 {
10807 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
10808 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
10809 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
10810 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10811 }
10812 case IEMMODE_64BIT:
10813 switch (pIemCpu->enmEffAddrMode)
10814 {
10815 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10816 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
10817 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
10818 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10819 }
10820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10821 }
10822 }
10823 IEMOP_MNEMONIC("movs Xv,Yv");
10824
10825 /*
10826 * Annoying double switch here.
10827 * Using ugly macro for implementing the cases, sharing it with movsb.
10828 */
10829 switch (pIemCpu->enmEffOpSize)
10830 {
10831 case IEMMODE_16BIT:
10832 switch (pIemCpu->enmEffAddrMode)
10833 {
10834 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
10835 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
10836 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
10837 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10838 }
10839 break;
10840
10841 case IEMMODE_32BIT:
10842 switch (pIemCpu->enmEffAddrMode)
10843 {
10844 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
10845 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
10846 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
10847 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10848 }
10849 break;
10850
10851 case IEMMODE_64BIT:
10852 switch (pIemCpu->enmEffAddrMode)
10853 {
10854 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
10855 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
10856 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
10857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10858 }
10859 break;
10860 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10861 }
10862 return VINF_SUCCESS;
10863}
10864
10865#undef IEM_MOVS_CASE
10866
10867/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
10868#define IEM_CMPS_CASE(ValBits, AddrBits) \
10869 IEM_MC_BEGIN(3, 3); \
10870 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
10871 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
10872 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
10873 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
10874 IEM_MC_LOCAL(RTGCPTR, uAddr); \
10875 \
10876 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
10877 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
10878 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
10879 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
10880 IEM_MC_REF_LOCAL(puValue1, uValue1); \
10881 IEM_MC_REF_EFLAGS(pEFlags); \
10882 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
10883 \
10884 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
10885 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10886 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10887 } IEM_MC_ELSE() { \
10888 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
10889 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
10890 } IEM_MC_ENDIF(); \
10891 IEM_MC_ADVANCE_RIP(); \
10892 IEM_MC_END(); \
10893
10894/** Opcode 0xa6. */
10895FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
10896{
10897 IEMOP_HLP_NO_LOCK_PREFIX();
10898
10899 /*
10900 * Use the C implementation if a repeat prefix is encountered.
10901 */
10902 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
10903 {
10904 IEMOP_MNEMONIC("repe cmps Xb,Yb");
10905 switch (pIemCpu->enmEffAddrMode)
10906 {
10907 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
10908 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
10909 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
10910 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10911 }
10912 }
10913 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
10914 {
10915 IEMOP_MNEMONIC("repe cmps Xb,Yb");
10916 switch (pIemCpu->enmEffAddrMode)
10917 {
10918 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
10919 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
10920 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
10921 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10922 }
10923 }
10924 IEMOP_MNEMONIC("cmps Xb,Yb");
10925
10926 /*
10927 * Sharing case implementation with cmps[wdq] below.
10928 */
10929 switch (pIemCpu->enmEffAddrMode)
10930 {
10931 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
10932 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
10933 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
10934 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10935 }
10936 return VINF_SUCCESS;
10937
10938}
10939
10940
10941/** Opcode 0xa7. */
10942FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
10943{
10944 IEMOP_HLP_NO_LOCK_PREFIX();
10945
10946 /*
10947 * Use the C implementation if a repeat prefix is encountered.
10948 */
10949 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
10950 {
10951 IEMOP_MNEMONIC("repe cmps Xv,Yv");
10952 switch (pIemCpu->enmEffOpSize)
10953 {
10954 case IEMMODE_16BIT:
10955 switch (pIemCpu->enmEffAddrMode)
10956 {
10957 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
10958 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
10959 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
10960 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10961 }
10962 break;
10963 case IEMMODE_32BIT:
10964 switch (pIemCpu->enmEffAddrMode)
10965 {
10966 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
10967 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
10968 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
10969 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10970 }
10971 case IEMMODE_64BIT:
10972 switch (pIemCpu->enmEffAddrMode)
10973 {
10974 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
10975 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
10976 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
10977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10978 }
10979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10980 }
10981 }
10982
10983 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
10984 {
10985 IEMOP_MNEMONIC("repne cmps Xv,Yv");
10986 switch (pIemCpu->enmEffOpSize)
10987 {
10988 case IEMMODE_16BIT:
10989 switch (pIemCpu->enmEffAddrMode)
10990 {
10991 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
10992 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
10993 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
10994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10995 }
10996 break;
10997 case IEMMODE_32BIT:
10998 switch (pIemCpu->enmEffAddrMode)
10999 {
11000 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
11001 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
11002 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
11003 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11004 }
11005 case IEMMODE_64BIT:
11006 switch (pIemCpu->enmEffAddrMode)
11007 {
11008 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11009 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
11010 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
11011 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11012 }
11013 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11014 }
11015 }
11016
11017 IEMOP_MNEMONIC("cmps Xv,Yv");
11018
11019 /*
11020 * Annoying double switch here.
11021 * Using ugly macro for implementing the cases, sharing it with cmpsb.
11022 */
11023 switch (pIemCpu->enmEffOpSize)
11024 {
11025 case IEMMODE_16BIT:
11026 switch (pIemCpu->enmEffAddrMode)
11027 {
11028 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
11029 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
11030 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
11031 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11032 }
11033 break;
11034
11035 case IEMMODE_32BIT:
11036 switch (pIemCpu->enmEffAddrMode)
11037 {
11038 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
11039 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
11040 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
11041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11042 }
11043 break;
11044
11045 case IEMMODE_64BIT:
11046 switch (pIemCpu->enmEffAddrMode)
11047 {
11048 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11049 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
11050 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
11051 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11052 }
11053 break;
11054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11055 }
11056 return VINF_SUCCESS;
11057
11058}
11059
11060#undef IEM_CMPS_CASE
11061
11062/** Opcode 0xa8. */
11063FNIEMOP_DEF(iemOp_test_AL_Ib)
11064{
11065 IEMOP_MNEMONIC("test al,Ib");
11066 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11067 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
11068}
11069
11070
11071/** Opcode 0xa9. */
11072FNIEMOP_DEF(iemOp_test_eAX_Iz)
11073{
11074 IEMOP_MNEMONIC("test rAX,Iz");
11075 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11076 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
11077}
11078
11079
11080/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
11081#define IEM_STOS_CASE(ValBits, AddrBits) \
11082 IEM_MC_BEGIN(0, 2); \
11083 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11084 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11085 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
11086 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11087 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
11088 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11089 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11090 } IEM_MC_ELSE() { \
11091 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11092 } IEM_MC_ENDIF(); \
11093 IEM_MC_ADVANCE_RIP(); \
11094 IEM_MC_END(); \
11095
11096/** Opcode 0xaa. */
11097FNIEMOP_DEF(iemOp_stosb_Yb_AL)
11098{
11099 IEMOP_HLP_NO_LOCK_PREFIX();
11100
11101 /*
11102 * Use the C implementation if a repeat prefix is encountered.
11103 */
11104 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11105 {
11106 IEMOP_MNEMONIC("rep stos Yb,al");
11107 switch (pIemCpu->enmEffAddrMode)
11108 {
11109 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
11110 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
11111 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
11112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11113 }
11114 }
11115 IEMOP_MNEMONIC("stos Yb,al");
11116
11117 /*
11118 * Sharing case implementation with stos[wdq] below.
11119 */
11120 switch (pIemCpu->enmEffAddrMode)
11121 {
11122 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
11123 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
11124 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
11125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11126 }
11127 return VINF_SUCCESS;
11128}
11129
11130
11131/** Opcode 0xab. */
11132FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
11133{
11134 IEMOP_HLP_NO_LOCK_PREFIX();
11135
11136 /*
11137 * Use the C implementation if a repeat prefix is encountered.
11138 */
11139 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11140 {
11141 IEMOP_MNEMONIC("rep stos Yv,rAX");
11142 switch (pIemCpu->enmEffOpSize)
11143 {
11144 case IEMMODE_16BIT:
11145 switch (pIemCpu->enmEffAddrMode)
11146 {
11147 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
11148 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
11149 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
11150 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11151 }
11152 break;
11153 case IEMMODE_32BIT:
11154 switch (pIemCpu->enmEffAddrMode)
11155 {
11156 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
11157 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
11158 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
11159 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11160 }
11161 case IEMMODE_64BIT:
11162 switch (pIemCpu->enmEffAddrMode)
11163 {
11164 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11165 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
11166 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
11167 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11168 }
11169 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11170 }
11171 }
11172 IEMOP_MNEMONIC("stos Yv,rAX");
11173
11174 /*
11175 * Annoying double switch here.
11176 * Using ugly macro for implementing the cases, sharing it with stosb.
11177 */
11178 switch (pIemCpu->enmEffOpSize)
11179 {
11180 case IEMMODE_16BIT:
11181 switch (pIemCpu->enmEffAddrMode)
11182 {
11183 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
11184 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
11185 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
11186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11187 }
11188 break;
11189
11190 case IEMMODE_32BIT:
11191 switch (pIemCpu->enmEffAddrMode)
11192 {
11193 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
11194 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
11195 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
11196 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11197 }
11198 break;
11199
11200 case IEMMODE_64BIT:
11201 switch (pIemCpu->enmEffAddrMode)
11202 {
11203 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11204 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
11205 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
11206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11207 }
11208 break;
11209 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11210 }
11211 return VINF_SUCCESS;
11212}
11213
11214#undef IEM_STOS_CASE
11215
11216/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
11217#define IEM_LODS_CASE(ValBits, AddrBits) \
11218 IEM_MC_BEGIN(0, 2); \
11219 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11220 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11221 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
11222 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
11223 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
11224 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11225 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11226 } IEM_MC_ELSE() { \
11227 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11228 } IEM_MC_ENDIF(); \
11229 IEM_MC_ADVANCE_RIP(); \
11230 IEM_MC_END();
11231
11232/** Opcode 0xac. */
11233FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
11234{
11235 IEMOP_HLP_NO_LOCK_PREFIX();
11236
11237 /*
11238 * Use the C implementation if a repeat prefix is encountered.
11239 */
11240 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11241 {
11242 IEMOP_MNEMONIC("rep lodsb al,Xb");
11243 switch (pIemCpu->enmEffAddrMode)
11244 {
11245 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
11246 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
11247 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
11248 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11249 }
11250 }
11251 IEMOP_MNEMONIC("lodsb al,Xb");
11252
11253 /*
11254 * Sharing case implementation with stos[wdq] below.
11255 */
11256 switch (pIemCpu->enmEffAddrMode)
11257 {
11258 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
11259 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
11260 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
11261 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11262 }
11263 return VINF_SUCCESS;
11264}
11265
11266
11267/** Opcode 0xad. */
11268FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
11269{
11270 IEMOP_HLP_NO_LOCK_PREFIX();
11271
11272 /*
11273 * Use the C implementation if a repeat prefix is encountered.
11274 */
11275 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11276 {
11277 IEMOP_MNEMONIC("rep lods rAX,Xv");
11278 switch (pIemCpu->enmEffOpSize)
11279 {
11280 case IEMMODE_16BIT:
11281 switch (pIemCpu->enmEffAddrMode)
11282 {
11283 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
11284 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
11285 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
11286 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11287 }
11288 break;
11289 case IEMMODE_32BIT:
11290 switch (pIemCpu->enmEffAddrMode)
11291 {
11292 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
11293 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
11294 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
11295 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11296 }
11297 case IEMMODE_64BIT:
11298 switch (pIemCpu->enmEffAddrMode)
11299 {
11300 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11301 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
11302 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
11303 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11304 }
11305 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11306 }
11307 }
11308 IEMOP_MNEMONIC("lods rAX,Xv");
11309
11310 /*
11311 * Annoying double switch here.
11312 * Using ugly macro for implementing the cases, sharing it with lodsb.
11313 */
11314 switch (pIemCpu->enmEffOpSize)
11315 {
11316 case IEMMODE_16BIT:
11317 switch (pIemCpu->enmEffAddrMode)
11318 {
11319 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
11320 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
11321 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
11322 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11323 }
11324 break;
11325
11326 case IEMMODE_32BIT:
11327 switch (pIemCpu->enmEffAddrMode)
11328 {
11329 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
11330 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
11331 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
11332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11333 }
11334 break;
11335
11336 case IEMMODE_64BIT:
11337 switch (pIemCpu->enmEffAddrMode)
11338 {
11339 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11340 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
11341 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
11342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11343 }
11344 break;
11345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11346 }
11347 return VINF_SUCCESS;
11348}
11349
11350#undef IEM_LODS_CASE
11351
11352/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
11353#define IEM_SCAS_CASE(ValBits, AddrBits) \
11354 IEM_MC_BEGIN(3, 2); \
11355 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
11356 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
11357 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
11358 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11359 \
11360 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11361 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
11362 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
11363 IEM_MC_REF_EFLAGS(pEFlags); \
11364 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
11365 \
11366 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11367 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11368 } IEM_MC_ELSE() { \
11369 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11370 } IEM_MC_ENDIF(); \
11371 IEM_MC_ADVANCE_RIP(); \
11372 IEM_MC_END();
11373
11374/** Opcode 0xae. */
11375FNIEMOP_DEF(iemOp_scasb_AL_Xb)
11376{
11377 IEMOP_HLP_NO_LOCK_PREFIX();
11378
11379 /*
11380 * Use the C implementation if a repeat prefix is encountered.
11381 */
11382 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11383 {
11384 IEMOP_MNEMONIC("repe scasb al,Xb");
11385 switch (pIemCpu->enmEffAddrMode)
11386 {
11387 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
11388 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
11389 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
11390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11391 }
11392 }
11393 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11394 {
11395 IEMOP_MNEMONIC("repne scasb al,Xb");
11396 switch (pIemCpu->enmEffAddrMode)
11397 {
11398 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
11399 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
11400 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
11401 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11402 }
11403 }
11404 IEMOP_MNEMONIC("scasb al,Xb");
11405
11406 /*
11407 * Sharing case implementation with stos[wdq] below.
11408 */
11409 switch (pIemCpu->enmEffAddrMode)
11410 {
11411 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
11412 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
11413 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
11414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11415 }
11416 return VINF_SUCCESS;
11417}
11418
11419
11420/** Opcode 0xaf. */
11421FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
11422{
11423 IEMOP_HLP_NO_LOCK_PREFIX();
11424
11425 /*
11426 * Use the C implementation if a repeat prefix is encountered.
11427 */
11428 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11429 {
11430 IEMOP_MNEMONIC("repe scas rAX,Xv");
11431 switch (pIemCpu->enmEffOpSize)
11432 {
11433 case IEMMODE_16BIT:
11434 switch (pIemCpu->enmEffAddrMode)
11435 {
11436 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
11437 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
11438 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
11439 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11440 }
11441 break;
11442 case IEMMODE_32BIT:
11443 switch (pIemCpu->enmEffAddrMode)
11444 {
11445 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
11446 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
11447 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
11448 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11449 }
11450 case IEMMODE_64BIT:
11451 switch (pIemCpu->enmEffAddrMode)
11452 {
11453 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
11454 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
11455 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
11456 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11457 }
11458 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11459 }
11460 }
11461 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11462 {
11463 IEMOP_MNEMONIC("repne scas rAX,Xv");
11464 switch (pIemCpu->enmEffOpSize)
11465 {
11466 case IEMMODE_16BIT:
11467 switch (pIemCpu->enmEffAddrMode)
11468 {
11469 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m16);
11470 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m32);
11471 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m64);
11472 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11473 }
11474 break;
11475 case IEMMODE_32BIT:
11476 switch (pIemCpu->enmEffAddrMode)
11477 {
11478 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m16);
11479 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m32);
11480 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m64);
11481 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11482 }
11483 case IEMMODE_64BIT:
11484 switch (pIemCpu->enmEffAddrMode)
11485 {
11486 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
11487 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m32);
11488 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m64);
11489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11490 }
11491 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11492 }
11493 }
11494 IEMOP_MNEMONIC("scas rAX,Xv");
11495
11496 /*
11497 * Annoying double switch here.
11498 * Using ugly macro for implementing the cases, sharing it with scasb.
11499 */
11500 switch (pIemCpu->enmEffOpSize)
11501 {
11502 case IEMMODE_16BIT:
11503 switch (pIemCpu->enmEffAddrMode)
11504 {
11505 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
11506 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
11507 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
11508 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11509 }
11510 break;
11511
11512 case IEMMODE_32BIT:
11513 switch (pIemCpu->enmEffAddrMode)
11514 {
11515 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
11516 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
11517 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
11518 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11519 }
11520 break;
11521
11522 case IEMMODE_64BIT:
11523 switch (pIemCpu->enmEffAddrMode)
11524 {
11525 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
11526 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
11527 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
11528 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11529 }
11530 break;
11531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11532 }
11533 return VINF_SUCCESS;
11534}
11535
11536#undef IEM_SCAS_CASE
11537
11538/**
11539 * Common 'mov r8, imm8' helper.
11540 */
11541FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
11542{
11543 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11544 IEMOP_HLP_NO_LOCK_PREFIX();
11545
11546 IEM_MC_BEGIN(0, 1);
11547 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
11548 IEM_MC_STORE_GREG_U8(iReg, u8Value);
11549 IEM_MC_ADVANCE_RIP();
11550 IEM_MC_END();
11551
11552 return VINF_SUCCESS;
11553}
11554
11555
11556/** Opcode 0xb0. */
11557FNIEMOP_DEF(iemOp_mov_AL_Ib)
11558{
11559 IEMOP_MNEMONIC("mov AL,Ib");
11560 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pIemCpu->uRexB);
11561}
11562
11563
11564/** Opcode 0xb1. */
11565FNIEMOP_DEF(iemOp_CL_Ib)
11566{
11567 IEMOP_MNEMONIC("mov CL,Ib");
11568 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pIemCpu->uRexB);
11569}
11570
11571
11572/** Opcode 0xb2. */
11573FNIEMOP_DEF(iemOp_DL_Ib)
11574{
11575 IEMOP_MNEMONIC("mov DL,Ib");
11576 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pIemCpu->uRexB);
11577}
11578
11579
11580/** Opcode 0xb3. */
11581FNIEMOP_DEF(iemOp_BL_Ib)
11582{
11583 IEMOP_MNEMONIC("mov BL,Ib");
11584 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pIemCpu->uRexB);
11585}
11586
11587
11588/** Opcode 0xb4. */
11589FNIEMOP_DEF(iemOp_mov_AH_Ib)
11590{
11591 IEMOP_MNEMONIC("mov AH,Ib");
11592 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pIemCpu->uRexB);
11593}
11594
11595
11596/** Opcode 0xb5. */
11597FNIEMOP_DEF(iemOp_CH_Ib)
11598{
11599 IEMOP_MNEMONIC("mov CH,Ib");
11600 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pIemCpu->uRexB);
11601}
11602
11603
11604/** Opcode 0xb6. */
11605FNIEMOP_DEF(iemOp_DH_Ib)
11606{
11607 IEMOP_MNEMONIC("mov DH,Ib");
11608 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pIemCpu->uRexB);
11609}
11610
11611
11612/** Opcode 0xb7. */
11613FNIEMOP_DEF(iemOp_BH_Ib)
11614{
11615 IEMOP_MNEMONIC("mov BH,Ib");
11616 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pIemCpu->uRexB);
11617}
11618
11619
11620/**
11621 * Common 'mov regX,immX' helper.
11622 */
11623FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
11624{
11625 switch (pIemCpu->enmEffOpSize)
11626 {
11627 case IEMMODE_16BIT:
11628 {
11629 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11630 IEMOP_HLP_NO_LOCK_PREFIX();
11631
11632 IEM_MC_BEGIN(0, 1);
11633 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
11634 IEM_MC_STORE_GREG_U16(iReg, u16Value);
11635 IEM_MC_ADVANCE_RIP();
11636 IEM_MC_END();
11637 break;
11638 }
11639
11640 case IEMMODE_32BIT:
11641 {
11642 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
11643 IEMOP_HLP_NO_LOCK_PREFIX();
11644
11645 IEM_MC_BEGIN(0, 1);
11646 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
11647 IEM_MC_STORE_GREG_U32(iReg, u32Value);
11648 IEM_MC_ADVANCE_RIP();
11649 IEM_MC_END();
11650 break;
11651 }
11652 case IEMMODE_64BIT:
11653 {
11654 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); /* 64-bit immediate! */
11655 IEMOP_HLP_NO_LOCK_PREFIX();
11656
11657 IEM_MC_BEGIN(0, 1);
11658 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
11659 IEM_MC_STORE_GREG_U64(iReg, u64Value);
11660 IEM_MC_ADVANCE_RIP();
11661 IEM_MC_END();
11662 break;
11663 }
11664 }
11665
11666 return VINF_SUCCESS;
11667}
11668
11669
11670/** Opcode 0xb8. */
11671FNIEMOP_DEF(iemOp_eAX_Iv)
11672{
11673 IEMOP_MNEMONIC("mov rAX,IV");
11674 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pIemCpu->uRexB);
11675}
11676
11677
11678/** Opcode 0xb9. */
11679FNIEMOP_DEF(iemOp_eCX_Iv)
11680{
11681 IEMOP_MNEMONIC("mov rCX,IV");
11682 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pIemCpu->uRexB);
11683}
11684
11685
11686/** Opcode 0xba. */
11687FNIEMOP_DEF(iemOp_eDX_Iv)
11688{
11689 IEMOP_MNEMONIC("mov rDX,IV");
11690 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pIemCpu->uRexB);
11691}
11692
11693
11694/** Opcode 0xbb. */
11695FNIEMOP_DEF(iemOp_eBX_Iv)
11696{
11697 IEMOP_MNEMONIC("mov rBX,IV");
11698 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pIemCpu->uRexB);
11699}
11700
11701
11702/** Opcode 0xbc. */
11703FNIEMOP_DEF(iemOp_eSP_Iv)
11704{
11705 IEMOP_MNEMONIC("mov rSP,IV");
11706 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pIemCpu->uRexB);
11707}
11708
11709
11710/** Opcode 0xbd. */
11711FNIEMOP_DEF(iemOp_eBP_Iv)
11712{
11713 IEMOP_MNEMONIC("mov rBP,IV");
11714 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pIemCpu->uRexB);
11715}
11716
11717
11718/** Opcode 0xbe. */
11719FNIEMOP_DEF(iemOp_eSI_Iv)
11720{
11721 IEMOP_MNEMONIC("mov rSI,IV");
11722 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pIemCpu->uRexB);
11723}
11724
11725
11726/** Opcode 0xbf. */
11727FNIEMOP_DEF(iemOp_eDI_Iv)
11728{
11729 IEMOP_MNEMONIC("mov rDI,IV");
11730 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pIemCpu->uRexB);
11731}
11732
11733
11734/** Opcode 0xc0. */
11735FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
11736{
11737 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11738 PCIEMOPSHIFTSIZES pImpl;
11739 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11740 {
11741 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
11742 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
11743 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
11744 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
11745 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
11746 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
11747 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
11748 case 6: return IEMOP_RAISE_INVALID_OPCODE();
11749 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
11750 }
11751 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
11752
11753 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11754 {
11755 /* register */
11756 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11757 IEMOP_HLP_NO_LOCK_PREFIX();
11758 IEM_MC_BEGIN(3, 0);
11759 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11760 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11761 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11762 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11763 IEM_MC_REF_EFLAGS(pEFlags);
11764 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
11765 IEM_MC_ADVANCE_RIP();
11766 IEM_MC_END();
11767 }
11768 else
11769 {
11770 /* memory */
11771 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11772 IEM_MC_BEGIN(3, 2);
11773 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
11774 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11775 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11776 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11777
11778 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11779 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11780 IEM_MC_ASSIGN(cShiftArg, cShift);
11781 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11782 IEM_MC_FETCH_EFLAGS(EFlags);
11783 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
11784
11785 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
11786 IEM_MC_COMMIT_EFLAGS(EFlags);
11787 IEM_MC_ADVANCE_RIP();
11788 IEM_MC_END();
11789 }
11790 return VINF_SUCCESS;
11791}
11792
11793
11794/** Opcode 0xc1. */
11795FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
11796{
11797 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11798 PCIEMOPSHIFTSIZES pImpl;
11799 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11800 {
11801 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
11802 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
11803 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
11804 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
11805 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
11806 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
11807 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
11808 case 6: return IEMOP_RAISE_INVALID_OPCODE();
11809 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
11810 }
11811 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
11812
11813 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11814 {
11815 /* register */
11816 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11817 IEMOP_HLP_NO_LOCK_PREFIX();
11818 switch (pIemCpu->enmEffOpSize)
11819 {
11820 case IEMMODE_16BIT:
11821 IEM_MC_BEGIN(3, 0);
11822 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11823 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11824 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11825 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11826 IEM_MC_REF_EFLAGS(pEFlags);
11827 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
11828 IEM_MC_ADVANCE_RIP();
11829 IEM_MC_END();
11830 return VINF_SUCCESS;
11831
11832 case IEMMODE_32BIT:
11833 IEM_MC_BEGIN(3, 0);
11834 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11835 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11836 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11837 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11838 IEM_MC_REF_EFLAGS(pEFlags);
11839 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
11840 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
11841 IEM_MC_ADVANCE_RIP();
11842 IEM_MC_END();
11843 return VINF_SUCCESS;
11844
11845 case IEMMODE_64BIT:
11846 IEM_MC_BEGIN(3, 0);
11847 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11848 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
11849 IEM_MC_ARG(uint32_t *, pEFlags, 2);
11850 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11851 IEM_MC_REF_EFLAGS(pEFlags);
11852 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
11853 IEM_MC_ADVANCE_RIP();
11854 IEM_MC_END();
11855 return VINF_SUCCESS;
11856
11857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11858 }
11859 }
11860 else
11861 {
11862 /* memory */
11863 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
11864 switch (pIemCpu->enmEffOpSize)
11865 {
11866 case IEMMODE_16BIT:
11867 IEM_MC_BEGIN(3, 2);
11868 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
11869 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11870 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11871 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11872
11873 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11874 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11875 IEM_MC_ASSIGN(cShiftArg, cShift);
11876 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11877 IEM_MC_FETCH_EFLAGS(EFlags);
11878 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
11879
11880 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
11881 IEM_MC_COMMIT_EFLAGS(EFlags);
11882 IEM_MC_ADVANCE_RIP();
11883 IEM_MC_END();
11884 return VINF_SUCCESS;
11885
11886 case IEMMODE_32BIT:
11887 IEM_MC_BEGIN(3, 2);
11888 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
11889 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11890 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11891 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11892
11893 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11894 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11895 IEM_MC_ASSIGN(cShiftArg, cShift);
11896 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11897 IEM_MC_FETCH_EFLAGS(EFlags);
11898 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
11899
11900 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
11901 IEM_MC_COMMIT_EFLAGS(EFlags);
11902 IEM_MC_ADVANCE_RIP();
11903 IEM_MC_END();
11904 return VINF_SUCCESS;
11905
11906 case IEMMODE_64BIT:
11907 IEM_MC_BEGIN(3, 2);
11908 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
11909 IEM_MC_ARG(uint8_t, cShiftArg, 1);
11910 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
11911 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11912
11913 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
11914 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
11915 IEM_MC_ASSIGN(cShiftArg, cShift);
11916 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
11917 IEM_MC_FETCH_EFLAGS(EFlags);
11918 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
11919
11920 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
11921 IEM_MC_COMMIT_EFLAGS(EFlags);
11922 IEM_MC_ADVANCE_RIP();
11923 IEM_MC_END();
11924 return VINF_SUCCESS;
11925
11926 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11927 }
11928 }
11929}
11930
11931
11932/** Opcode 0xc2. */
11933FNIEMOP_DEF(iemOp_retn_Iw)
11934{
11935 IEMOP_MNEMONIC("retn Iw");
11936 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
11937 IEMOP_HLP_NO_LOCK_PREFIX();
11938 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11939 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
11940}
11941
11942
11943/** Opcode 0xc3. */
11944FNIEMOP_DEF(iemOp_retn)
11945{
11946 IEMOP_MNEMONIC("retn");
11947 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11948 IEMOP_HLP_NO_LOCK_PREFIX();
11949 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
11950}
11951
11952
11953/** Opcode 0xc4. */
11954FNIEMOP_DEF(iemOp_les_Gv_Mp)
11955{
11956 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11957 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
11958 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11959 {
11960 IEMOP_MNEMONIC("2-byte-vex");
11961 /* The LES instruction is invalid 64-bit mode. In legacy and
11962 compatability mode it is invalid with MOD=3.
11963 The use as a VEX prefix is made possible by assigning the inverted
11964 REX.R to the top MOD bit, and the top bit in the inverted register
11965 specifier to the bottom MOD bit, thereby effectively limiting 32-bit
11966 to accessing registers 0..7 in this VEX form. */
11967 /** @todo VEX: Just use new tables for it. */
11968 return IEMOP_RAISE_INVALID_OPCODE();
11969 }
11970 IEMOP_MNEMONIC("les Gv,Mp");
11971 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm);
11972}
11973
11974
11975/** Opcode 0xc5. */
11976FNIEMOP_DEF(iemOp_lds_Gv_Mp)
11977{
11978 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11979 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
11980 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11981 {
11982 IEMOP_MNEMONIC("3-byte-vex");
11983 /* The LDS instruction is invalid 64-bit mode. In legacy and
11984 compatability mode it is invalid with MOD=3.
11985 The use as a VEX prefix is made possible by assigning the inverted
11986 REX.R and REX.X to the two MOD bits, since the REX bits are ignored
11987 outside of 64-bit mode. */
11988 /** @todo VEX: Just use new tables for it. */
11989 return IEMOP_RAISE_INVALID_OPCODE();
11990 }
11991 IEMOP_MNEMONIC("lds Gv,Mp");
11992 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_DS, bRm);
11993}
11994
11995
11996/** Opcode 0xc6. */
11997FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
11998{
11999 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12000 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12001 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
12002 return IEMOP_RAISE_INVALID_OPCODE();
12003 IEMOP_MNEMONIC("mov Eb,Ib");
12004
12005 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12006 {
12007 /* register access */
12008 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
12009 IEM_MC_BEGIN(0, 0);
12010 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
12011 IEM_MC_ADVANCE_RIP();
12012 IEM_MC_END();
12013 }
12014 else
12015 {
12016 /* memory access. */
12017 IEM_MC_BEGIN(0, 1);
12018 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12019 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12020 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
12021 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
12022 IEM_MC_ADVANCE_RIP();
12023 IEM_MC_END();
12024 }
12025 return VINF_SUCCESS;
12026}
12027
12028
12029/** Opcode 0xc7. */
12030FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
12031{
12032 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12033 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12034 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
12035 return IEMOP_RAISE_INVALID_OPCODE();
12036 IEMOP_MNEMONIC("mov Ev,Iz");
12037
12038 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12039 {
12040 /* register access */
12041 switch (pIemCpu->enmEffOpSize)
12042 {
12043 case IEMMODE_16BIT:
12044 IEM_MC_BEGIN(0, 0);
12045 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12046 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
12047 IEM_MC_ADVANCE_RIP();
12048 IEM_MC_END();
12049 return VINF_SUCCESS;
12050
12051 case IEMMODE_32BIT:
12052 IEM_MC_BEGIN(0, 0);
12053 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12054 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
12055 IEM_MC_ADVANCE_RIP();
12056 IEM_MC_END();
12057 return VINF_SUCCESS;
12058
12059 case IEMMODE_64BIT:
12060 IEM_MC_BEGIN(0, 0);
12061 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12062 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
12063 IEM_MC_ADVANCE_RIP();
12064 IEM_MC_END();
12065 return VINF_SUCCESS;
12066
12067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12068 }
12069 }
12070 else
12071 {
12072 /* memory access. */
12073 switch (pIemCpu->enmEffOpSize)
12074 {
12075 case IEMMODE_16BIT:
12076 IEM_MC_BEGIN(0, 1);
12077 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12078 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
12079 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12080 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
12081 IEM_MC_ADVANCE_RIP();
12082 IEM_MC_END();
12083 return VINF_SUCCESS;
12084
12085 case IEMMODE_32BIT:
12086 IEM_MC_BEGIN(0, 1);
12087 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12088 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12089 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12090 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
12091 IEM_MC_ADVANCE_RIP();
12092 IEM_MC_END();
12093 return VINF_SUCCESS;
12094
12095 case IEMMODE_64BIT:
12096 IEM_MC_BEGIN(0, 1);
12097 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12098 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12099 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12100 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
12101 IEM_MC_ADVANCE_RIP();
12102 IEM_MC_END();
12103 return VINF_SUCCESS;
12104
12105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12106 }
12107 }
12108}
12109
12110
12111
12112
12113/** Opcode 0xc8. */
12114FNIEMOP_DEF(iemOp_enter_Iw_Ib)
12115{
12116 IEMOP_MNEMONIC("enter Iw,Ib");
12117 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12118 IEMOP_HLP_NO_LOCK_PREFIX();
12119 uint16_t cbFrame; IEM_OPCODE_GET_NEXT_U16(&cbFrame);
12120 uint8_t u8NestingLevel; IEM_OPCODE_GET_NEXT_U8(&u8NestingLevel);
12121 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_enter, pIemCpu->enmEffOpSize, cbFrame, u8NestingLevel);
12122}
12123
12124
12125/** Opcode 0xc9. */
12126FNIEMOP_DEF(iemOp_leave)
12127{
12128 IEMOP_MNEMONIC("retn");
12129 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12130 IEMOP_HLP_NO_LOCK_PREFIX();
12131 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
12132}
12133
12134
12135/** Opcode 0xca. */
12136FNIEMOP_DEF(iemOp_retf_Iw)
12137{
12138 IEMOP_MNEMONIC("retf Iw");
12139 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12140 IEMOP_HLP_NO_LOCK_PREFIX();
12141 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12142 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
12143}
12144
12145
12146/** Opcode 0xcb. */
12147FNIEMOP_DEF(iemOp_retf)
12148{
12149 IEMOP_MNEMONIC("retf");
12150 IEMOP_HLP_NO_LOCK_PREFIX();
12151 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12152 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
12153}
12154
12155
12156/** Opcode 0xcc. */
12157FNIEMOP_DEF(iemOp_int_3)
12158{
12159 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
12160}
12161
12162
12163/** Opcode 0xcd. */
12164FNIEMOP_DEF(iemOp_int_Ib)
12165{
12166 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
12167 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
12168}
12169
12170
12171/** Opcode 0xce. */
12172FNIEMOP_DEF(iemOp_into)
12173{
12174 IEM_MC_BEGIN(2, 0);
12175 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
12176 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
12177 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
12178 IEM_MC_END();
12179 return VINF_SUCCESS;
12180}
12181
12182
12183/** Opcode 0xcf. */
12184FNIEMOP_DEF(iemOp_iret)
12185{
12186 IEMOP_MNEMONIC("iret");
12187 IEMOP_HLP_NO_LOCK_PREFIX();
12188 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
12189}
12190
12191
12192/** Opcode 0xd0. */
12193FNIEMOP_DEF(iemOp_Grp2_Eb_1)
12194{
12195 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12196 PCIEMOPSHIFTSIZES pImpl;
12197 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12198 {
12199 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
12200 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
12201 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
12202 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
12203 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
12204 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
12205 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
12206 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12207 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12208 }
12209 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12210
12211 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12212 {
12213 /* register */
12214 IEMOP_HLP_NO_LOCK_PREFIX();
12215 IEM_MC_BEGIN(3, 0);
12216 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12217 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12218 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12219 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12220 IEM_MC_REF_EFLAGS(pEFlags);
12221 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12222 IEM_MC_ADVANCE_RIP();
12223 IEM_MC_END();
12224 }
12225 else
12226 {
12227 /* memory */
12228 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12229 IEM_MC_BEGIN(3, 2);
12230 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12231 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12232 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12233 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12234
12235 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12236 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12237 IEM_MC_FETCH_EFLAGS(EFlags);
12238 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12239
12240 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12241 IEM_MC_COMMIT_EFLAGS(EFlags);
12242 IEM_MC_ADVANCE_RIP();
12243 IEM_MC_END();
12244 }
12245 return VINF_SUCCESS;
12246}
12247
12248
12249
12250/** Opcode 0xd1. */
12251FNIEMOP_DEF(iemOp_Grp2_Ev_1)
12252{
12253 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12254 PCIEMOPSHIFTSIZES pImpl;
12255 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12256 {
12257 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
12258 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
12259 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
12260 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
12261 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
12262 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
12263 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
12264 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12265 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12266 }
12267 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12268
12269 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12270 {
12271 /* register */
12272 IEMOP_HLP_NO_LOCK_PREFIX();
12273 switch (pIemCpu->enmEffOpSize)
12274 {
12275 case IEMMODE_16BIT:
12276 IEM_MC_BEGIN(3, 0);
12277 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12278 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12279 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12280 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12281 IEM_MC_REF_EFLAGS(pEFlags);
12282 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12283 IEM_MC_ADVANCE_RIP();
12284 IEM_MC_END();
12285 return VINF_SUCCESS;
12286
12287 case IEMMODE_32BIT:
12288 IEM_MC_BEGIN(3, 0);
12289 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12290 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12291 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12292 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12293 IEM_MC_REF_EFLAGS(pEFlags);
12294 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12295 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12296 IEM_MC_ADVANCE_RIP();
12297 IEM_MC_END();
12298 return VINF_SUCCESS;
12299
12300 case IEMMODE_64BIT:
12301 IEM_MC_BEGIN(3, 0);
12302 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12303 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12304 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12305 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12306 IEM_MC_REF_EFLAGS(pEFlags);
12307 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12308 IEM_MC_ADVANCE_RIP();
12309 IEM_MC_END();
12310 return VINF_SUCCESS;
12311
12312 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12313 }
12314 }
12315 else
12316 {
12317 /* memory */
12318 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12319 switch (pIemCpu->enmEffOpSize)
12320 {
12321 case IEMMODE_16BIT:
12322 IEM_MC_BEGIN(3, 2);
12323 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12324 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12325 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12326 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12327
12328 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12329 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12330 IEM_MC_FETCH_EFLAGS(EFlags);
12331 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12332
12333 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12334 IEM_MC_COMMIT_EFLAGS(EFlags);
12335 IEM_MC_ADVANCE_RIP();
12336 IEM_MC_END();
12337 return VINF_SUCCESS;
12338
12339 case IEMMODE_32BIT:
12340 IEM_MC_BEGIN(3, 2);
12341 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12342 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12343 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12344 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12345
12346 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12347 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12348 IEM_MC_FETCH_EFLAGS(EFlags);
12349 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12350
12351 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12352 IEM_MC_COMMIT_EFLAGS(EFlags);
12353 IEM_MC_ADVANCE_RIP();
12354 IEM_MC_END();
12355 return VINF_SUCCESS;
12356
12357 case IEMMODE_64BIT:
12358 IEM_MC_BEGIN(3, 2);
12359 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12360 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12361 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12363
12364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12365 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12366 IEM_MC_FETCH_EFLAGS(EFlags);
12367 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12368
12369 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12370 IEM_MC_COMMIT_EFLAGS(EFlags);
12371 IEM_MC_ADVANCE_RIP();
12372 IEM_MC_END();
12373 return VINF_SUCCESS;
12374
12375 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12376 }
12377 }
12378}
12379
12380
12381/** Opcode 0xd2. */
12382FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
12383{
12384 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12385 PCIEMOPSHIFTSIZES pImpl;
12386 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12387 {
12388 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
12389 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
12390 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
12391 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
12392 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
12393 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
12394 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
12395 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12396 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
12397 }
12398 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12399
12400 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12401 {
12402 /* register */
12403 IEMOP_HLP_NO_LOCK_PREFIX();
12404 IEM_MC_BEGIN(3, 0);
12405 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12406 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12407 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12408 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12409 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12410 IEM_MC_REF_EFLAGS(pEFlags);
12411 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12412 IEM_MC_ADVANCE_RIP();
12413 IEM_MC_END();
12414 }
12415 else
12416 {
12417 /* memory */
12418 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12419 IEM_MC_BEGIN(3, 2);
12420 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12421 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12422 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12423 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12424
12425 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12426 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12427 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12428 IEM_MC_FETCH_EFLAGS(EFlags);
12429 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12430
12431 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12432 IEM_MC_COMMIT_EFLAGS(EFlags);
12433 IEM_MC_ADVANCE_RIP();
12434 IEM_MC_END();
12435 }
12436 return VINF_SUCCESS;
12437}
12438
12439
12440/** Opcode 0xd3. */
12441FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
12442{
12443 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12444 PCIEMOPSHIFTSIZES pImpl;
12445 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12446 {
12447 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
12448 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
12449 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
12450 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
12451 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
12452 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
12453 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
12454 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12455 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
12456 }
12457 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12458
12459 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12460 {
12461 /* register */
12462 IEMOP_HLP_NO_LOCK_PREFIX();
12463 switch (pIemCpu->enmEffOpSize)
12464 {
12465 case IEMMODE_16BIT:
12466 IEM_MC_BEGIN(3, 0);
12467 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12468 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12469 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12470 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12471 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12472 IEM_MC_REF_EFLAGS(pEFlags);
12473 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12474 IEM_MC_ADVANCE_RIP();
12475 IEM_MC_END();
12476 return VINF_SUCCESS;
12477
12478 case IEMMODE_32BIT:
12479 IEM_MC_BEGIN(3, 0);
12480 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12481 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12482 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12483 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12484 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12485 IEM_MC_REF_EFLAGS(pEFlags);
12486 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12487 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12488 IEM_MC_ADVANCE_RIP();
12489 IEM_MC_END();
12490 return VINF_SUCCESS;
12491
12492 case IEMMODE_64BIT:
12493 IEM_MC_BEGIN(3, 0);
12494 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12495 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12496 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12497 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12498 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12499 IEM_MC_REF_EFLAGS(pEFlags);
12500 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12501 IEM_MC_ADVANCE_RIP();
12502 IEM_MC_END();
12503 return VINF_SUCCESS;
12504
12505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12506 }
12507 }
12508 else
12509 {
12510 /* memory */
12511 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12512 switch (pIemCpu->enmEffOpSize)
12513 {
12514 case IEMMODE_16BIT:
12515 IEM_MC_BEGIN(3, 2);
12516 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12517 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12518 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12519 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12520
12521 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12522 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12523 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12524 IEM_MC_FETCH_EFLAGS(EFlags);
12525 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12526
12527 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12528 IEM_MC_COMMIT_EFLAGS(EFlags);
12529 IEM_MC_ADVANCE_RIP();
12530 IEM_MC_END();
12531 return VINF_SUCCESS;
12532
12533 case IEMMODE_32BIT:
12534 IEM_MC_BEGIN(3, 2);
12535 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12536 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12537 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12538 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12539
12540 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12541 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12542 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12543 IEM_MC_FETCH_EFLAGS(EFlags);
12544 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12545
12546 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12547 IEM_MC_COMMIT_EFLAGS(EFlags);
12548 IEM_MC_ADVANCE_RIP();
12549 IEM_MC_END();
12550 return VINF_SUCCESS;
12551
12552 case IEMMODE_64BIT:
12553 IEM_MC_BEGIN(3, 2);
12554 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12555 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12556 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12558
12559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12560 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12561 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12562 IEM_MC_FETCH_EFLAGS(EFlags);
12563 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12564
12565 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12566 IEM_MC_COMMIT_EFLAGS(EFlags);
12567 IEM_MC_ADVANCE_RIP();
12568 IEM_MC_END();
12569 return VINF_SUCCESS;
12570
12571 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12572 }
12573 }
12574}
12575
12576/** Opcode 0xd4. */
12577FNIEMOP_DEF(iemOp_aam_Ib)
12578{
12579 IEMOP_MNEMONIC("aam Ib");
12580 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
12581 IEMOP_HLP_NO_LOCK_PREFIX();
12582 IEMOP_HLP_NO_64BIT();
12583 if (!bImm)
12584 return IEMOP_RAISE_DIVIDE_ERROR();
12585 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
12586}
12587
12588
12589/** Opcode 0xd5. */
12590FNIEMOP_DEF(iemOp_aad_Ib)
12591{
12592 IEMOP_MNEMONIC("aad Ib");
12593 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
12594 IEMOP_HLP_NO_LOCK_PREFIX();
12595 IEMOP_HLP_NO_64BIT();
12596 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
12597}
12598
12599
12600/** Opcode 0xd7. */
12601FNIEMOP_DEF(iemOp_xlat)
12602{
12603 IEMOP_MNEMONIC("xlat");
12604 IEMOP_HLP_NO_LOCK_PREFIX();
12605 switch (pIemCpu->enmEffAddrMode)
12606 {
12607 case IEMMODE_16BIT:
12608 IEM_MC_BEGIN(2, 0);
12609 IEM_MC_LOCAL(uint8_t, u8Tmp);
12610 IEM_MC_LOCAL(uint16_t, u16Addr);
12611 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
12612 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
12613 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
12614 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12615 IEM_MC_ADVANCE_RIP();
12616 IEM_MC_END();
12617 return VINF_SUCCESS;
12618
12619 case IEMMODE_32BIT:
12620 IEM_MC_BEGIN(2, 0);
12621 IEM_MC_LOCAL(uint8_t, u8Tmp);
12622 IEM_MC_LOCAL(uint32_t, u32Addr);
12623 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
12624 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
12625 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
12626 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12627 IEM_MC_ADVANCE_RIP();
12628 IEM_MC_END();
12629 return VINF_SUCCESS;
12630
12631 case IEMMODE_64BIT:
12632 IEM_MC_BEGIN(2, 0);
12633 IEM_MC_LOCAL(uint8_t, u8Tmp);
12634 IEM_MC_LOCAL(uint64_t, u64Addr);
12635 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
12636 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
12637 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
12638 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
12639 IEM_MC_ADVANCE_RIP();
12640 IEM_MC_END();
12641 return VINF_SUCCESS;
12642
12643 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12644 }
12645}
12646
12647
12648/**
12649 * Common worker for FPU instructions working on ST0 and STn, and storing the
12650 * result in ST0.
12651 *
12652 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12653 */
12654FNIEMOP_DEF_2(iemOpHlpFpu_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
12655{
12656 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12657
12658 IEM_MC_BEGIN(3, 1);
12659 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12660 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12661 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12662 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12663
12664 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12665 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12666 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12667 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
12668 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
12669 IEM_MC_ELSE()
12670 IEM_MC_FPU_STACK_UNDERFLOW(0);
12671 IEM_MC_ENDIF();
12672 IEM_MC_USED_FPU();
12673 IEM_MC_ADVANCE_RIP();
12674
12675 IEM_MC_END();
12676 return VINF_SUCCESS;
12677}
12678
12679
12680/**
12681 * Common worker for FPU instructions working on ST0 and STn, and only affecting
12682 * flags.
12683 *
12684 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12685 */
12686FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
12687{
12688 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12689
12690 IEM_MC_BEGIN(3, 1);
12691 IEM_MC_LOCAL(uint16_t, u16Fsw);
12692 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12693 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12694 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12695
12696 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12697 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12698 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12699 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
12700 IEM_MC_UPDATE_FSW(u16Fsw);
12701 IEM_MC_ELSE()
12702 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
12703 IEM_MC_ENDIF();
12704 IEM_MC_USED_FPU();
12705 IEM_MC_ADVANCE_RIP();
12706
12707 IEM_MC_END();
12708 return VINF_SUCCESS;
12709}
12710
12711
12712/**
12713 * Common worker for FPU instructions working on ST0 and STn, only affecting
12714 * flags, and popping when done.
12715 *
12716 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12717 */
12718FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
12719{
12720 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12721
12722 IEM_MC_BEGIN(3, 1);
12723 IEM_MC_LOCAL(uint16_t, u16Fsw);
12724 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12725 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12726 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
12727
12728 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12729 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12730 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
12731 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
12732 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
12733 IEM_MC_ELSE()
12734 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(UINT8_MAX);
12735 IEM_MC_ENDIF();
12736 IEM_MC_USED_FPU();
12737 IEM_MC_ADVANCE_RIP();
12738
12739 IEM_MC_END();
12740 return VINF_SUCCESS;
12741}
12742
12743
12744/** Opcode 0xd8 11/0. */
12745FNIEMOP_DEF_1(iemOp_fadd_stN, uint8_t, bRm)
12746{
12747 IEMOP_MNEMONIC("fadd st0,stN");
12748 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fadd_r80_by_r80);
12749}
12750
12751
12752/** Opcode 0xd8 11/1. */
12753FNIEMOP_DEF_1(iemOp_fmul_stN, uint8_t, bRm)
12754{
12755 IEMOP_MNEMONIC("fmul st0,stN");
12756 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fmul_r80_by_r80);
12757}
12758
12759
12760/** Opcode 0xd8 11/2. */
12761FNIEMOP_DEF_1(iemOp_fcom_stN, uint8_t, bRm)
12762{
12763 IEMOP_MNEMONIC("fcom st0,stN");
12764 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fcom_r80_by_r80);
12765}
12766
12767
12768/** Opcode 0xd8 11/3. */
12769FNIEMOP_DEF_1(iemOp_fcomp_stN, uint8_t, bRm)
12770{
12771 IEMOP_MNEMONIC("fcomp st0,stN");
12772 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fcom_r80_by_r80);
12773}
12774
12775
12776/** Opcode 0xd8 11/4. */
12777FNIEMOP_DEF_1(iemOp_fsub_stN, uint8_t, bRm)
12778{
12779 IEMOP_MNEMONIC("fsub st0,stN");
12780 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsub_r80_by_r80);
12781}
12782
12783
12784/** Opcode 0xd8 11/5. */
12785FNIEMOP_DEF_1(iemOp_fsubr_stN, uint8_t, bRm)
12786{
12787 IEMOP_MNEMONIC("fsubr st0,stN");
12788 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsubr_r80_by_r80);
12789}
12790
12791
12792/** Opcode 0xd8 11/6. */
12793FNIEMOP_DEF_1(iemOp_fdiv_stN, uint8_t, bRm)
12794{
12795 IEMOP_MNEMONIC("fdiv st0,stN");
12796 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdiv_r80_by_r80);
12797}
12798
12799
12800/** Opcode 0xd8 11/7. */
12801FNIEMOP_DEF_1(iemOp_fdivr_stN, uint8_t, bRm)
12802{
12803 IEMOP_MNEMONIC("fdivr st0,stN");
12804 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdivr_r80_by_r80);
12805}
12806
12807
12808/**
12809 * Common worker for FPU instructions working on ST0 and an m32r, and storing
12810 * the result in ST0.
12811 *
12812 * @param pfnAImpl Pointer to the instruction implementation (assembly).
12813 */
12814FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32r, uint8_t, bRm, PFNIEMAIMPLFPUR32, pfnAImpl)
12815{
12816 IEM_MC_BEGIN(3, 3);
12817 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12818 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
12819 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12820 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
12821 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12822 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12823
12824 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12825 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12826
12827 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12828 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12829 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12830
12831 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12832 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr32Val2);
12833 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
12834 IEM_MC_ELSE()
12835 IEM_MC_FPU_STACK_UNDERFLOW(0);
12836 IEM_MC_ENDIF();
12837 IEM_MC_USED_FPU();
12838 IEM_MC_ADVANCE_RIP();
12839
12840 IEM_MC_END();
12841 return VINF_SUCCESS;
12842}
12843
12844
12845/** Opcode 0xd8 !11/0. */
12846FNIEMOP_DEF_1(iemOp_fadd_m32r, uint8_t, bRm)
12847{
12848 IEMOP_MNEMONIC("fadd st0,m32r");
12849 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fadd_r80_by_r32);
12850}
12851
12852
12853/** Opcode 0xd8 !11/1. */
12854FNIEMOP_DEF_1(iemOp_fmul_m32r, uint8_t, bRm)
12855{
12856 IEMOP_MNEMONIC("fmul st0,m32r");
12857 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fmul_r80_by_r32);
12858}
12859
12860
12861/** Opcode 0xd8 !11/2. */
12862FNIEMOP_DEF_1(iemOp_fcom_m32r, uint8_t, bRm)
12863{
12864 IEMOP_MNEMONIC("fcom st0,m32r");
12865
12866 IEM_MC_BEGIN(3, 3);
12867 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12868 IEM_MC_LOCAL(uint16_t, u16Fsw);
12869 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12870 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12871 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12872 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12873
12874 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12875 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12876
12877 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12878 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12879 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12880
12881 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12882 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
12883 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
12884 IEM_MC_ELSE()
12885 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
12886 IEM_MC_ENDIF();
12887 IEM_MC_USED_FPU();
12888 IEM_MC_ADVANCE_RIP();
12889
12890 IEM_MC_END();
12891 return VINF_SUCCESS;
12892}
12893
12894
12895/** Opcode 0xd8 !11/3. */
12896FNIEMOP_DEF_1(iemOp_fcomp_m32r, uint8_t, bRm)
12897{
12898 IEMOP_MNEMONIC("fcomp st0,m32r");
12899
12900 IEM_MC_BEGIN(3, 3);
12901 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
12902 IEM_MC_LOCAL(uint16_t, u16Fsw);
12903 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
12904 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
12905 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
12906 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
12907
12908 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
12909 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12910
12911 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
12912 IEM_MC_MAYBE_RAISE_FPU_XCPT();
12913 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
12914
12915 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
12916 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
12917 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
12918 IEM_MC_ELSE()
12919 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
12920 IEM_MC_ENDIF();
12921 IEM_MC_USED_FPU();
12922 IEM_MC_ADVANCE_RIP();
12923
12924 IEM_MC_END();
12925 return VINF_SUCCESS;
12926}
12927
12928
12929/** Opcode 0xd8 !11/4. */
12930FNIEMOP_DEF_1(iemOp_fsub_m32r, uint8_t, bRm)
12931{
12932 IEMOP_MNEMONIC("fsub st0,m32r");
12933 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsub_r80_by_r32);
12934}
12935
12936
12937/** Opcode 0xd8 !11/5. */
12938FNIEMOP_DEF_1(iemOp_fsubr_m32r, uint8_t, bRm)
12939{
12940 IEMOP_MNEMONIC("fsubr st0,m32r");
12941 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsubr_r80_by_r32);
12942}
12943
12944
12945/** Opcode 0xd8 !11/6. */
12946FNIEMOP_DEF_1(iemOp_fdiv_m32r, uint8_t, bRm)
12947{
12948 IEMOP_MNEMONIC("fdiv st0,m32r");
12949 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdiv_r80_by_r32);
12950}
12951
12952
12953/** Opcode 0xd8 !11/7. */
12954FNIEMOP_DEF_1(iemOp_fdivr_m32r, uint8_t, bRm)
12955{
12956 IEMOP_MNEMONIC("fdivr st0,m32r");
12957 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdivr_r80_by_r32);
12958}
12959
12960
12961/** Opcode 0xd8. */
12962FNIEMOP_DEF(iemOp_EscF0)
12963{
12964 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
12965 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12966
12967 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12968 {
12969 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12970 {
12971 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN, bRm);
12972 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN, bRm);
12973 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm);
12974 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
12975 case 4: return FNIEMOP_CALL_1(iemOp_fsub_stN, bRm);
12976 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_stN, bRm);
12977 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_stN, bRm);
12978 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_stN, bRm);
12979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12980 }
12981 }
12982 else
12983 {
12984 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12985 {
12986 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m32r, bRm);
12987 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m32r, bRm);
12988 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m32r, bRm);
12989 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m32r, bRm);
12990 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m32r, bRm);
12991 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m32r, bRm);
12992 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m32r, bRm);
12993 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m32r, bRm);
12994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12995 }
12996 }
12997}
12998
12999
13000/** Opcode 0xd9 /0 mem32real
13001 * @sa iemOp_fld_m64r */
13002FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm)
13003{
13004 IEMOP_MNEMONIC("fld m32r");
13005
13006 IEM_MC_BEGIN(2, 3);
13007 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13008 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13009 IEM_MC_LOCAL(RTFLOAT32U, r32Val);
13010 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13011 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val, r32Val, 1);
13012
13013 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13014 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13015
13016 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13017 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13018 IEM_MC_FETCH_MEM_R32(r32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
13019
13020 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13021 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r32_to_r80, pFpuRes, pr32Val);
13022 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
13023 IEM_MC_ELSE()
13024 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
13025 IEM_MC_ENDIF();
13026 IEM_MC_USED_FPU();
13027 IEM_MC_ADVANCE_RIP();
13028
13029 IEM_MC_END();
13030 return VINF_SUCCESS;
13031}
13032
13033
13034/** Opcode 0xd9 !11/2 mem32real */
13035FNIEMOP_DEF_1(iemOp_fst_m32r, uint8_t, bRm)
13036{
13037 IEMOP_MNEMONIC("fst m32r");
13038 IEM_MC_BEGIN(3, 2);
13039 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13040 IEM_MC_LOCAL(uint16_t, u16Fsw);
13041 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13042 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13043 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13044
13045 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13046 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13047 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13048 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13049
13050 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13051 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13052 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13053 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13054 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13055 IEM_MC_ELSE()
13056 IEM_MC_IF_FCW_IM()
13057 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13058 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13059 IEM_MC_ENDIF();
13060 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13061 IEM_MC_ENDIF();
13062 IEM_MC_USED_FPU();
13063 IEM_MC_ADVANCE_RIP();
13064
13065 IEM_MC_END();
13066 return VINF_SUCCESS;
13067}
13068
13069
13070/** Opcode 0xd9 !11/3 */
13071FNIEMOP_DEF_1(iemOp_fstp_m32r, uint8_t, bRm)
13072{
13073 IEMOP_MNEMONIC("fstp m32r");
13074 IEM_MC_BEGIN(3, 2);
13075 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13076 IEM_MC_LOCAL(uint16_t, u16Fsw);
13077 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13078 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13079 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13080
13081 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13082 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13083 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13084 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13085
13086 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13087 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13088 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13089 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13090 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13091 IEM_MC_ELSE()
13092 IEM_MC_IF_FCW_IM()
13093 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13094 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13095 IEM_MC_ENDIF();
13096 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13097 IEM_MC_ENDIF();
13098 IEM_MC_USED_FPU();
13099 IEM_MC_ADVANCE_RIP();
13100
13101 IEM_MC_END();
13102 return VINF_SUCCESS;
13103}
13104
13105
13106/** Opcode 0xd9 !11/4 */
13107FNIEMOP_DEF_1(iemOp_fldenv, uint8_t, bRm)
13108{
13109 IEMOP_MNEMONIC("fldenv m14/28byte");
13110 IEM_MC_BEGIN(3, 0);
13111 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13112 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
13113 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
13114 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13115 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13116 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13117 IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc);
13118 IEM_MC_END();
13119 return VINF_SUCCESS;
13120}
13121
13122
13123/** Opcode 0xd9 !11/5 */
13124FNIEMOP_DEF_1(iemOp_fldcw, uint8_t, bRm)
13125{
13126 IEMOP_MNEMONIC("fldcw m2byte");
13127 IEM_MC_BEGIN(1, 1);
13128 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13129 IEM_MC_ARG(uint16_t, u16Fsw, 0);
13130 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13131 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13132 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13133 IEM_MC_FETCH_MEM_U16(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13134 IEM_MC_CALL_CIMPL_1(iemCImpl_fldcw, u16Fsw);
13135 IEM_MC_END();
13136 return VINF_SUCCESS;
13137}
13138
13139
13140/** Opcode 0xd9 !11/6 */
13141FNIEMOP_DEF_1(iemOp_fnstenv, uint8_t, bRm)
13142{
13143 IEMOP_MNEMONIC("fstenv m14/m28byte");
13144 IEM_MC_BEGIN(3, 0);
13145 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13146 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
13147 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
13148 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13149 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13150 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13151 IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst);
13152 IEM_MC_END();
13153 return VINF_SUCCESS;
13154}
13155
13156
13157/** Opcode 0xd9 !11/7 */
13158FNIEMOP_DEF_1(iemOp_fnstcw, uint8_t, bRm)
13159{
13160 IEMOP_MNEMONIC("fnstcw m2byte");
13161 IEM_MC_BEGIN(2, 0);
13162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13163 IEM_MC_LOCAL(uint16_t, u16Fcw);
13164 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13165 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13166 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13167 IEM_MC_FETCH_FCW(u16Fcw);
13168 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Fcw);
13169 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13170 IEM_MC_END();
13171 return VINF_SUCCESS;
13172}
13173
13174
13175/** Opcode 0xd9 0xc9, 0xd9 0xd8-0xdf, ++?. */
13176FNIEMOP_DEF(iemOp_fnop)
13177{
13178 IEMOP_MNEMONIC("fnop");
13179 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13180
13181 IEM_MC_BEGIN(0, 0);
13182 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13183 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13184 /** @todo Testcase: looks like FNOP leaves FOP alone but updates FPUIP. Could be
13185 * intel optimizations. Investigate. */
13186 IEM_MC_UPDATE_FPU_OPCODE_IP();
13187 IEM_MC_USED_FPU();
13188 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13189 IEM_MC_END();
13190 return VINF_SUCCESS;
13191}
13192
13193
13194/** Opcode 0xd9 11/0 stN */
13195FNIEMOP_DEF_1(iemOp_fld_stN, uint8_t, bRm)
13196{
13197 IEMOP_MNEMONIC("fld stN");
13198 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13199
13200 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13201 * indicates that it does. */
13202 IEM_MC_BEGIN(0, 2);
13203 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13204 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13205 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13206 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13207 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, bRm & X86_MODRM_RM_MASK)
13208 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13209 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13210 IEM_MC_ELSE()
13211 IEM_MC_FPU_STACK_PUSH_UNDERFLOW();
13212 IEM_MC_ENDIF();
13213 IEM_MC_USED_FPU();
13214 IEM_MC_ADVANCE_RIP();
13215 IEM_MC_END();
13216
13217 return VINF_SUCCESS;
13218}
13219
13220
13221/** Opcode 0xd9 11/3 stN */
13222FNIEMOP_DEF_1(iemOp_fxch_stN, uint8_t, bRm)
13223{
13224 IEMOP_MNEMONIC("fxch stN");
13225 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13226
13227 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13228 * indicates that it does. */
13229 IEM_MC_BEGIN(1, 3);
13230 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value1);
13231 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value2);
13232 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13233 IEM_MC_ARG_CONST(uint8_t, iStReg, /*=*/ bRm & X86_MODRM_RM_MASK, 0);
13234 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13235 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13236 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13237 IEM_MC_SET_FPU_RESULT(FpuRes, X86_FSW_C1, pr80Value2);
13238 IEM_MC_STORE_FPUREG_R80_SRC_REF(bRm & X86_MODRM_RM_MASK, pr80Value1);
13239 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13240 IEM_MC_ELSE()
13241 IEM_MC_CALL_CIMPL_1(iemCImpl_fxch_underflow, iStReg);
13242 IEM_MC_ENDIF();
13243 IEM_MC_USED_FPU();
13244 IEM_MC_ADVANCE_RIP();
13245 IEM_MC_END();
13246
13247 return VINF_SUCCESS;
13248}
13249
13250
13251/** Opcode 0xd9 11/4, 0xdd 11/2. */
13252FNIEMOP_DEF_1(iemOp_fstp_stN, uint8_t, bRm)
13253{
13254 IEMOP_MNEMONIC("fstp st0,stN");
13255 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13256
13257 /* fstp st0, st0 is frequendly used as an official 'ffreep st0' sequence. */
13258 uint8_t const iDstReg = bRm & X86_MODRM_RM_MASK;
13259 if (!iDstReg)
13260 {
13261 IEM_MC_BEGIN(0, 1);
13262 IEM_MC_LOCAL_CONST(uint16_t, u16Fsw, /*=*/ 0);
13263 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13264 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13265 IEM_MC_IF_FPUREG_NOT_EMPTY(0)
13266 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
13267 IEM_MC_ELSE()
13268 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(0);
13269 IEM_MC_ENDIF();
13270 IEM_MC_USED_FPU();
13271 IEM_MC_ADVANCE_RIP();
13272 IEM_MC_END();
13273 }
13274 else
13275 {
13276 IEM_MC_BEGIN(0, 2);
13277 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13278 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13279 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13280 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13281 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13282 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13283 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, iDstReg);
13284 IEM_MC_ELSE()
13285 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(iDstReg);
13286 IEM_MC_ENDIF();
13287 IEM_MC_USED_FPU();
13288 IEM_MC_ADVANCE_RIP();
13289 IEM_MC_END();
13290 }
13291 return VINF_SUCCESS;
13292}
13293
13294
13295/**
13296 * Common worker for FPU instructions working on ST0 and replaces it with the
13297 * result, i.e. unary operators.
13298 *
13299 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13300 */
13301FNIEMOP_DEF_1(iemOpHlpFpu_st0, PFNIEMAIMPLFPUR80UNARY, pfnAImpl)
13302{
13303 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13304
13305 IEM_MC_BEGIN(2, 1);
13306 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13307 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13308 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13309
13310 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13311 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13312 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13313 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuRes, pr80Value);
13314 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13315 IEM_MC_ELSE()
13316 IEM_MC_FPU_STACK_UNDERFLOW(0);
13317 IEM_MC_ENDIF();
13318 IEM_MC_USED_FPU();
13319 IEM_MC_ADVANCE_RIP();
13320
13321 IEM_MC_END();
13322 return VINF_SUCCESS;
13323}
13324
13325
13326/** Opcode 0xd9 0xe0. */
13327FNIEMOP_DEF(iemOp_fchs)
13328{
13329 IEMOP_MNEMONIC("fchs st0");
13330 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fchs_r80);
13331}
13332
13333
13334/** Opcode 0xd9 0xe1. */
13335FNIEMOP_DEF(iemOp_fabs)
13336{
13337 IEMOP_MNEMONIC("fabs st0");
13338 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fabs_r80);
13339}
13340
13341
13342/**
13343 * Common worker for FPU instructions working on ST0 and only returns FSW.
13344 *
13345 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13346 */
13347FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0, PFNIEMAIMPLFPUR80UNARYFSW, pfnAImpl)
13348{
13349 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13350
13351 IEM_MC_BEGIN(2, 1);
13352 IEM_MC_LOCAL(uint16_t, u16Fsw);
13353 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13354 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13355
13356 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13357 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13358 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13359 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pu16Fsw, pr80Value);
13360 IEM_MC_UPDATE_FSW(u16Fsw);
13361 IEM_MC_ELSE()
13362 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
13363 IEM_MC_ENDIF();
13364 IEM_MC_USED_FPU();
13365 IEM_MC_ADVANCE_RIP();
13366
13367 IEM_MC_END();
13368 return VINF_SUCCESS;
13369}
13370
13371
13372/** Opcode 0xd9 0xe4. */
13373FNIEMOP_DEF(iemOp_ftst)
13374{
13375 IEMOP_MNEMONIC("ftst st0");
13376 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_ftst_r80);
13377}
13378
13379
13380/** Opcode 0xd9 0xe5. */
13381FNIEMOP_DEF(iemOp_fxam)
13382{
13383 IEMOP_MNEMONIC("fxam st0");
13384 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_fxam_r80);
13385}
13386
13387
13388/**
13389 * Common worker for FPU instructions pushing a constant onto the FPU stack.
13390 *
13391 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13392 */
13393FNIEMOP_DEF_1(iemOpHlpFpuPushConstant, PFNIEMAIMPLFPUR80LDCONST, pfnAImpl)
13394{
13395 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13396
13397 IEM_MC_BEGIN(1, 1);
13398 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13399 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13400
13401 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13402 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13403 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13404 IEM_MC_CALL_FPU_AIMPL_1(pfnAImpl, pFpuRes);
13405 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13406 IEM_MC_ELSE()
13407 IEM_MC_FPU_STACK_PUSH_OVERFLOW();
13408 IEM_MC_ENDIF();
13409 IEM_MC_USED_FPU();
13410 IEM_MC_ADVANCE_RIP();
13411
13412 IEM_MC_END();
13413 return VINF_SUCCESS;
13414}
13415
13416
13417/** Opcode 0xd9 0xe8. */
13418FNIEMOP_DEF(iemOp_fld1)
13419{
13420 IEMOP_MNEMONIC("fld1");
13421 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fld1);
13422}
13423
13424
13425/** Opcode 0xd9 0xe9. */
13426FNIEMOP_DEF(iemOp_fldl2t)
13427{
13428 IEMOP_MNEMONIC("fldl2t");
13429 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2t);
13430}
13431
13432
13433/** Opcode 0xd9 0xea. */
13434FNIEMOP_DEF(iemOp_fldl2e)
13435{
13436 IEMOP_MNEMONIC("fldl2e");
13437 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2e);
13438}
13439
13440/** Opcode 0xd9 0xeb. */
13441FNIEMOP_DEF(iemOp_fldpi)
13442{
13443 IEMOP_MNEMONIC("fldpi");
13444 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldpi);
13445}
13446
13447
13448/** Opcode 0xd9 0xec. */
13449FNIEMOP_DEF(iemOp_fldlg2)
13450{
13451 IEMOP_MNEMONIC("fldlg2");
13452 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldlg2);
13453}
13454
13455/** Opcode 0xd9 0xed. */
13456FNIEMOP_DEF(iemOp_fldln2)
13457{
13458 IEMOP_MNEMONIC("fldln2");
13459 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldln2);
13460}
13461
13462
13463/** Opcode 0xd9 0xee. */
13464FNIEMOP_DEF(iemOp_fldz)
13465{
13466 IEMOP_MNEMONIC("fldz");
13467 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldz);
13468}
13469
13470
13471/** Opcode 0xd9 0xf0. */
13472FNIEMOP_DEF(iemOp_f2xm1)
13473{
13474 IEMOP_MNEMONIC("f2xm1 st0");
13475 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_f2xm1_r80);
13476}
13477
13478
13479/** Opcode 0xd9 0xf1. */
13480FNIEMOP_DEF(iemOp_fylx2)
13481{
13482 IEMOP_MNEMONIC("fylx2 st0");
13483 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fyl2x_r80);
13484}
13485
13486
13487/**
13488 * Common worker for FPU instructions working on ST0 and having two outputs, one
13489 * replacing ST0 and one pushed onto the stack.
13490 *
13491 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13492 */
13493FNIEMOP_DEF_1(iemOpHlpFpuReplace_st0_push, PFNIEMAIMPLFPUR80UNARYTWO, pfnAImpl)
13494{
13495 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13496
13497 IEM_MC_BEGIN(2, 1);
13498 IEM_MC_LOCAL(IEMFPURESULTTWO, FpuResTwo);
13499 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULTTWO, pFpuResTwo, FpuResTwo, 0);
13500 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13501
13502 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13503 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13504 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13505 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuResTwo, pr80Value);
13506 IEM_MC_PUSH_FPU_RESULT_TWO(FpuResTwo);
13507 IEM_MC_ELSE()
13508 IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO();
13509 IEM_MC_ENDIF();
13510 IEM_MC_USED_FPU();
13511 IEM_MC_ADVANCE_RIP();
13512
13513 IEM_MC_END();
13514 return VINF_SUCCESS;
13515}
13516
13517
13518/** Opcode 0xd9 0xf2. */
13519FNIEMOP_DEF(iemOp_fptan)
13520{
13521 IEMOP_MNEMONIC("fptan st0");
13522 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fptan_r80_r80);
13523}
13524
13525
13526/**
13527 * Common worker for FPU instructions working on STn and ST0, storing the result
13528 * in STn, and popping the stack unless IE, DE or ZE was raised.
13529 *
13530 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13531 */
13532FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
13533{
13534 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13535
13536 IEM_MC_BEGIN(3, 1);
13537 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13538 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13539 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13540 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13541
13542 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13543 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13544
13545 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
13546 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
13547 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, bRm & X86_MODRM_RM_MASK);
13548 IEM_MC_ELSE()
13549 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(bRm & X86_MODRM_RM_MASK);
13550 IEM_MC_ENDIF();
13551 IEM_MC_USED_FPU();
13552 IEM_MC_ADVANCE_RIP();
13553
13554 IEM_MC_END();
13555 return VINF_SUCCESS;
13556}
13557
13558
13559/** Opcode 0xd9 0xf3. */
13560FNIEMOP_DEF(iemOp_fpatan)
13561{
13562 IEMOP_MNEMONIC("fpatan st1,st0");
13563 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fpatan_r80_by_r80);
13564}
13565
13566
13567/** Opcode 0xd9 0xf4. */
13568FNIEMOP_DEF(iemOp_fxtract)
13569{
13570 IEMOP_MNEMONIC("fxtract st0");
13571 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fxtract_r80_r80);
13572}
13573
13574
13575/** Opcode 0xd9 0xf5. */
13576FNIEMOP_DEF(iemOp_fprem1)
13577{
13578 IEMOP_MNEMONIC("fprem1 st0, st1");
13579 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem1_r80_by_r80);
13580}
13581
13582
13583/** Opcode 0xd9 0xf6. */
13584FNIEMOP_DEF(iemOp_fdecstp)
13585{
13586 IEMOP_MNEMONIC("fdecstp");
13587 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13588 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
13589 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
13590 * FINCSTP and FDECSTP. */
13591
13592 IEM_MC_BEGIN(0,0);
13593
13594 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13595 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13596
13597 IEM_MC_FPU_STACK_DEC_TOP();
13598 IEM_MC_UPDATE_FSW_CONST(0);
13599
13600 IEM_MC_USED_FPU();
13601 IEM_MC_ADVANCE_RIP();
13602 IEM_MC_END();
13603 return VINF_SUCCESS;
13604}
13605
13606
13607/** Opcode 0xd9 0xf7. */
13608FNIEMOP_DEF(iemOp_fincstp)
13609{
13610 IEMOP_MNEMONIC("fincstp");
13611 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13612 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
13613 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
13614 * FINCSTP and FDECSTP. */
13615
13616 IEM_MC_BEGIN(0,0);
13617
13618 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13619 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13620
13621 IEM_MC_FPU_STACK_INC_TOP();
13622 IEM_MC_UPDATE_FSW_CONST(0);
13623
13624 IEM_MC_USED_FPU();
13625 IEM_MC_ADVANCE_RIP();
13626 IEM_MC_END();
13627 return VINF_SUCCESS;
13628}
13629
13630
13631/** Opcode 0xd9 0xf8. */
13632FNIEMOP_DEF(iemOp_fprem)
13633{
13634 IEMOP_MNEMONIC("fprem st0, st1");
13635 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem_r80_by_r80);
13636}
13637
13638
13639/** Opcode 0xd9 0xf9. */
13640FNIEMOP_DEF(iemOp_fyl2xp1)
13641{
13642 IEMOP_MNEMONIC("fyl2xp1 st1,st0");
13643 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fyl2xp1_r80_by_r80);
13644}
13645
13646
13647/** Opcode 0xd9 0xfa. */
13648FNIEMOP_DEF(iemOp_fsqrt)
13649{
13650 IEMOP_MNEMONIC("fsqrt st0");
13651 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsqrt_r80);
13652}
13653
13654
13655/** Opcode 0xd9 0xfb. */
13656FNIEMOP_DEF(iemOp_fsincos)
13657{
13658 IEMOP_MNEMONIC("fsincos st0");
13659 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fsincos_r80_r80);
13660}
13661
13662
13663/** Opcode 0xd9 0xfc. */
13664FNIEMOP_DEF(iemOp_frndint)
13665{
13666 IEMOP_MNEMONIC("frndint st0");
13667 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_frndint_r80);
13668}
13669
13670
13671/** Opcode 0xd9 0xfd. */
13672FNIEMOP_DEF(iemOp_fscale)
13673{
13674 IEMOP_MNEMONIC("fscale st0, st1");
13675 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fscale_r80_by_r80);
13676}
13677
13678
13679/** Opcode 0xd9 0xfe. */
13680FNIEMOP_DEF(iemOp_fsin)
13681{
13682 IEMOP_MNEMONIC("fsin st0");
13683 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsin_r80);
13684}
13685
13686
13687/** Opcode 0xd9 0xff. */
13688FNIEMOP_DEF(iemOp_fcos)
13689{
13690 IEMOP_MNEMONIC("fcos st0");
13691 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fcos_r80);
13692}
13693
13694
13695/** Used by iemOp_EscF1. */
13696static const PFNIEMOP g_apfnEscF1_E0toFF[32] =
13697{
13698 /* 0xe0 */ iemOp_fchs,
13699 /* 0xe1 */ iemOp_fabs,
13700 /* 0xe2 */ iemOp_Invalid,
13701 /* 0xe3 */ iemOp_Invalid,
13702 /* 0xe4 */ iemOp_ftst,
13703 /* 0xe5 */ iemOp_fxam,
13704 /* 0xe6 */ iemOp_Invalid,
13705 /* 0xe7 */ iemOp_Invalid,
13706 /* 0xe8 */ iemOp_fld1,
13707 /* 0xe9 */ iemOp_fldl2t,
13708 /* 0xea */ iemOp_fldl2e,
13709 /* 0xeb */ iemOp_fldpi,
13710 /* 0xec */ iemOp_fldlg2,
13711 /* 0xed */ iemOp_fldln2,
13712 /* 0xee */ iemOp_fldz,
13713 /* 0xef */ iemOp_Invalid,
13714 /* 0xf0 */ iemOp_f2xm1,
13715 /* 0xf1 */ iemOp_fylx2,
13716 /* 0xf2 */ iemOp_fptan,
13717 /* 0xf3 */ iemOp_fpatan,
13718 /* 0xf4 */ iemOp_fxtract,
13719 /* 0xf5 */ iemOp_fprem1,
13720 /* 0xf6 */ iemOp_fdecstp,
13721 /* 0xf7 */ iemOp_fincstp,
13722 /* 0xf8 */ iemOp_fprem,
13723 /* 0xf9 */ iemOp_fyl2xp1,
13724 /* 0xfa */ iemOp_fsqrt,
13725 /* 0xfb */ iemOp_fsincos,
13726 /* 0xfc */ iemOp_frndint,
13727 /* 0xfd */ iemOp_fscale,
13728 /* 0xfe */ iemOp_fsin,
13729 /* 0xff */ iemOp_fcos
13730};
13731
13732
13733/** Opcode 0xd9. */
13734FNIEMOP_DEF(iemOp_EscF1)
13735{
13736 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
13737 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
13738 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
13739 {
13740 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13741 {
13742 case 0: return FNIEMOP_CALL_1(iemOp_fld_stN, bRm);
13743 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm);
13744 case 2:
13745 if (bRm == 0xc9)
13746 return FNIEMOP_CALL(iemOp_fnop);
13747 return IEMOP_RAISE_INVALID_OPCODE();
13748 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved. Intel behavior seems to be FSTP ST(i) though. */
13749 case 4:
13750 case 5:
13751 case 6:
13752 case 7:
13753 Assert((unsigned)bRm - 0xe0U < RT_ELEMENTS(g_apfnEscF1_E0toFF));
13754 return FNIEMOP_CALL(g_apfnEscF1_E0toFF[bRm - 0xe0]);
13755 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13756 }
13757 }
13758 else
13759 {
13760 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13761 {
13762 case 0: return FNIEMOP_CALL_1(iemOp_fld_m32r, bRm);
13763 case 1: return IEMOP_RAISE_INVALID_OPCODE();
13764 case 2: return FNIEMOP_CALL_1(iemOp_fst_m32r, bRm);
13765 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m32r, bRm);
13766 case 4: return FNIEMOP_CALL_1(iemOp_fldenv, bRm);
13767 case 5: return FNIEMOP_CALL_1(iemOp_fldcw, bRm);
13768 case 6: return FNIEMOP_CALL_1(iemOp_fnstenv, bRm);
13769 case 7: return FNIEMOP_CALL_1(iemOp_fnstcw, bRm);
13770 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13771 }
13772 }
13773}
13774
13775
13776/** Opcode 0xda 11/0. */
13777FNIEMOP_DEF_1(iemOp_fcmovb_stN, uint8_t, bRm)
13778{
13779 IEMOP_MNEMONIC("fcmovb st0,stN");
13780 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13781
13782 IEM_MC_BEGIN(0, 1);
13783 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13784
13785 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13786 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13787
13788 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13789 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF)
13790 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13791 IEM_MC_ENDIF();
13792 IEM_MC_UPDATE_FPU_OPCODE_IP();
13793 IEM_MC_ELSE()
13794 IEM_MC_FPU_STACK_UNDERFLOW(0);
13795 IEM_MC_ENDIF();
13796 IEM_MC_USED_FPU();
13797 IEM_MC_ADVANCE_RIP();
13798
13799 IEM_MC_END();
13800 return VINF_SUCCESS;
13801}
13802
13803
13804/** Opcode 0xda 11/1. */
13805FNIEMOP_DEF_1(iemOp_fcmove_stN, uint8_t, bRm)
13806{
13807 IEMOP_MNEMONIC("fcmove st0,stN");
13808 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13809
13810 IEM_MC_BEGIN(0, 1);
13811 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13812
13813 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13814 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13815
13816 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13817 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF)
13818 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13819 IEM_MC_ENDIF();
13820 IEM_MC_UPDATE_FPU_OPCODE_IP();
13821 IEM_MC_ELSE()
13822 IEM_MC_FPU_STACK_UNDERFLOW(0);
13823 IEM_MC_ENDIF();
13824 IEM_MC_USED_FPU();
13825 IEM_MC_ADVANCE_RIP();
13826
13827 IEM_MC_END();
13828 return VINF_SUCCESS;
13829}
13830
13831
13832/** Opcode 0xda 11/2. */
13833FNIEMOP_DEF_1(iemOp_fcmovbe_stN, uint8_t, bRm)
13834{
13835 IEMOP_MNEMONIC("fcmovbe st0,stN");
13836 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13837
13838 IEM_MC_BEGIN(0, 1);
13839 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13840
13841 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13842 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13843
13844 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13845 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
13846 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13847 IEM_MC_ENDIF();
13848 IEM_MC_UPDATE_FPU_OPCODE_IP();
13849 IEM_MC_ELSE()
13850 IEM_MC_FPU_STACK_UNDERFLOW(0);
13851 IEM_MC_ENDIF();
13852 IEM_MC_USED_FPU();
13853 IEM_MC_ADVANCE_RIP();
13854
13855 IEM_MC_END();
13856 return VINF_SUCCESS;
13857}
13858
13859
13860/** Opcode 0xda 11/3. */
13861FNIEMOP_DEF_1(iemOp_fcmovu_stN, uint8_t, bRm)
13862{
13863 IEMOP_MNEMONIC("fcmovu st0,stN");
13864 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13865
13866 IEM_MC_BEGIN(0, 1);
13867 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
13868
13869 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13870 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13871
13872 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
13873 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF)
13874 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
13875 IEM_MC_ENDIF();
13876 IEM_MC_UPDATE_FPU_OPCODE_IP();
13877 IEM_MC_ELSE()
13878 IEM_MC_FPU_STACK_UNDERFLOW(0);
13879 IEM_MC_ENDIF();
13880 IEM_MC_USED_FPU();
13881 IEM_MC_ADVANCE_RIP();
13882
13883 IEM_MC_END();
13884 return VINF_SUCCESS;
13885}
13886
13887
13888/**
13889 * Common worker for FPU instructions working on ST0 and STn, only affecting
13890 * flags, and popping twice when done.
13891 *
13892 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13893 */
13894FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
13895{
13896 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13897
13898 IEM_MC_BEGIN(3, 1);
13899 IEM_MC_LOCAL(uint16_t, u16Fsw);
13900 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13901 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13902 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13903
13904 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13905 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13906 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, 1)
13907 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
13908 IEM_MC_UPDATE_FSW_THEN_POP_POP(u16Fsw);
13909 IEM_MC_ELSE()
13910 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP();
13911 IEM_MC_ENDIF();
13912 IEM_MC_USED_FPU();
13913 IEM_MC_ADVANCE_RIP();
13914
13915 IEM_MC_END();
13916 return VINF_SUCCESS;
13917}
13918
13919
13920/** Opcode 0xda 0xe9. */
13921FNIEMOP_DEF(iemOp_fucompp)
13922{
13923 IEMOP_MNEMONIC("fucompp st0,stN");
13924 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fucom_r80_by_r80);
13925}
13926
13927
13928/**
13929 * Common worker for FPU instructions working on ST0 and an m32i, and storing
13930 * the result in ST0.
13931 *
13932 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13933 */
13934FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32i, uint8_t, bRm, PFNIEMAIMPLFPUI32, pfnAImpl)
13935{
13936 IEM_MC_BEGIN(3, 3);
13937 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13938 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13939 IEM_MC_LOCAL(int32_t, i32Val2);
13940 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13941 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13942 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13943
13944 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13945 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13946
13947 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13948 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13949 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13950
13951 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13952 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi32Val2);
13953 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13954 IEM_MC_ELSE()
13955 IEM_MC_FPU_STACK_UNDERFLOW(0);
13956 IEM_MC_ENDIF();
13957 IEM_MC_USED_FPU();
13958 IEM_MC_ADVANCE_RIP();
13959
13960 IEM_MC_END();
13961 return VINF_SUCCESS;
13962}
13963
13964
13965/** Opcode 0xda !11/0. */
13966FNIEMOP_DEF_1(iemOp_fiadd_m32i, uint8_t, bRm)
13967{
13968 IEMOP_MNEMONIC("fiadd m32i");
13969 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fiadd_r80_by_i32);
13970}
13971
13972
13973/** Opcode 0xda !11/1. */
13974FNIEMOP_DEF_1(iemOp_fimul_m32i, uint8_t, bRm)
13975{
13976 IEMOP_MNEMONIC("fimul m32i");
13977 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fimul_r80_by_i32);
13978}
13979
13980
13981/** Opcode 0xda !11/2. */
13982FNIEMOP_DEF_1(iemOp_ficom_m32i, uint8_t, bRm)
13983{
13984 IEMOP_MNEMONIC("ficom st0,m32i");
13985
13986 IEM_MC_BEGIN(3, 3);
13987 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13988 IEM_MC_LOCAL(uint16_t, u16Fsw);
13989 IEM_MC_LOCAL(int32_t, i32Val2);
13990 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13991 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13992 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
13993
13994 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13995 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13996
13997 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13998 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13999 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14000
14001 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14002 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
14003 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14004 IEM_MC_ELSE()
14005 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14006 IEM_MC_ENDIF();
14007 IEM_MC_USED_FPU();
14008 IEM_MC_ADVANCE_RIP();
14009
14010 IEM_MC_END();
14011 return VINF_SUCCESS;
14012}
14013
14014
14015/** Opcode 0xda !11/3. */
14016FNIEMOP_DEF_1(iemOp_ficomp_m32i, uint8_t, bRm)
14017{
14018 IEMOP_MNEMONIC("ficomp st0,m32i");
14019
14020 IEM_MC_BEGIN(3, 3);
14021 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14022 IEM_MC_LOCAL(uint16_t, u16Fsw);
14023 IEM_MC_LOCAL(int32_t, i32Val2);
14024 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14025 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14026 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
14027
14028 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14029 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14030
14031 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14032 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14033 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14034
14035 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14036 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
14037 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14038 IEM_MC_ELSE()
14039 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14040 IEM_MC_ENDIF();
14041 IEM_MC_USED_FPU();
14042 IEM_MC_ADVANCE_RIP();
14043
14044 IEM_MC_END();
14045 return VINF_SUCCESS;
14046}
14047
14048
14049/** Opcode 0xda !11/4. */
14050FNIEMOP_DEF_1(iemOp_fisub_m32i, uint8_t, bRm)
14051{
14052 IEMOP_MNEMONIC("fisub m32i");
14053 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisub_r80_by_i32);
14054}
14055
14056
14057/** Opcode 0xda !11/5. */
14058FNIEMOP_DEF_1(iemOp_fisubr_m32i, uint8_t, bRm)
14059{
14060 IEMOP_MNEMONIC("fisubr m32i");
14061 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisubr_r80_by_i32);
14062}
14063
14064
14065/** Opcode 0xda !11/6. */
14066FNIEMOP_DEF_1(iemOp_fidiv_m32i, uint8_t, bRm)
14067{
14068 IEMOP_MNEMONIC("fidiv m32i");
14069 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidiv_r80_by_i32);
14070}
14071
14072
14073/** Opcode 0xda !11/7. */
14074FNIEMOP_DEF_1(iemOp_fidivr_m32i, uint8_t, bRm)
14075{
14076 IEMOP_MNEMONIC("fidivr m32i");
14077 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidivr_r80_by_i32);
14078}
14079
14080
14081/** Opcode 0xda. */
14082FNIEMOP_DEF(iemOp_EscF2)
14083{
14084 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14085 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14086 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14087 {
14088 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14089 {
14090 case 0: return FNIEMOP_CALL_1(iemOp_fcmovb_stN, bRm);
14091 case 1: return FNIEMOP_CALL_1(iemOp_fcmove_stN, bRm);
14092 case 2: return FNIEMOP_CALL_1(iemOp_fcmovbe_stN, bRm);
14093 case 3: return FNIEMOP_CALL_1(iemOp_fcmovu_stN, bRm);
14094 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14095 case 5:
14096 if (bRm == 0xe9)
14097 return FNIEMOP_CALL(iemOp_fucompp);
14098 return IEMOP_RAISE_INVALID_OPCODE();
14099 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14100 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14101 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14102 }
14103 }
14104 else
14105 {
14106 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14107 {
14108 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m32i, bRm);
14109 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m32i, bRm);
14110 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m32i, bRm);
14111 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m32i, bRm);
14112 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m32i, bRm);
14113 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m32i, bRm);
14114 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m32i, bRm);
14115 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m32i, bRm);
14116 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14117 }
14118 }
14119}
14120
14121
14122/** Opcode 0xdb !11/0. */
14123FNIEMOP_DEF_1(iemOp_fild_m32i, uint8_t, bRm)
14124{
14125 IEMOP_MNEMONIC("fild m32i");
14126
14127 IEM_MC_BEGIN(2, 3);
14128 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14129 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14130 IEM_MC_LOCAL(int32_t, i32Val);
14131 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14132 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val, i32Val, 1);
14133
14134 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14135 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14136
14137 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14138 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14139 IEM_MC_FETCH_MEM_I32(i32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14140
14141 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14142 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i32_to_r80, pFpuRes, pi32Val);
14143 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14144 IEM_MC_ELSE()
14145 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14146 IEM_MC_ENDIF();
14147 IEM_MC_USED_FPU();
14148 IEM_MC_ADVANCE_RIP();
14149
14150 IEM_MC_END();
14151 return VINF_SUCCESS;
14152}
14153
14154
14155/** Opcode 0xdb !11/1. */
14156FNIEMOP_DEF_1(iemOp_fisttp_m32i, uint8_t, bRm)
14157{
14158 IEMOP_MNEMONIC("fisttp m32i");
14159 IEM_MC_BEGIN(3, 2);
14160 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14161 IEM_MC_LOCAL(uint16_t, u16Fsw);
14162 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14163 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14164 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14165
14166 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14167 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14168 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14169 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14170
14171 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14172 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14173 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14174 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14175 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14176 IEM_MC_ELSE()
14177 IEM_MC_IF_FCW_IM()
14178 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14179 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14180 IEM_MC_ENDIF();
14181 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14182 IEM_MC_ENDIF();
14183 IEM_MC_USED_FPU();
14184 IEM_MC_ADVANCE_RIP();
14185
14186 IEM_MC_END();
14187 return VINF_SUCCESS;
14188}
14189
14190
14191/** Opcode 0xdb !11/2. */
14192FNIEMOP_DEF_1(iemOp_fist_m32i, uint8_t, bRm)
14193{
14194 IEMOP_MNEMONIC("fist m32i");
14195 IEM_MC_BEGIN(3, 2);
14196 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14197 IEM_MC_LOCAL(uint16_t, u16Fsw);
14198 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14199 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14200 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14201
14202 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14203 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14204 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14205 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14206
14207 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14208 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14209 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14210 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14211 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14212 IEM_MC_ELSE()
14213 IEM_MC_IF_FCW_IM()
14214 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14215 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14216 IEM_MC_ENDIF();
14217 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14218 IEM_MC_ENDIF();
14219 IEM_MC_USED_FPU();
14220 IEM_MC_ADVANCE_RIP();
14221
14222 IEM_MC_END();
14223 return VINF_SUCCESS;
14224}
14225
14226
14227/** Opcode 0xdb !11/3. */
14228FNIEMOP_DEF_1(iemOp_fistp_m32i, uint8_t, bRm)
14229{
14230 IEMOP_MNEMONIC("fisttp m32i");
14231 IEM_MC_BEGIN(3, 2);
14232 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14233 IEM_MC_LOCAL(uint16_t, u16Fsw);
14234 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14235 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14236 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14237
14238 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14239 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14240 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14241 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14242
14243 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14244 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14245 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14246 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14247 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14248 IEM_MC_ELSE()
14249 IEM_MC_IF_FCW_IM()
14250 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14251 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14252 IEM_MC_ENDIF();
14253 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14254 IEM_MC_ENDIF();
14255 IEM_MC_USED_FPU();
14256 IEM_MC_ADVANCE_RIP();
14257
14258 IEM_MC_END();
14259 return VINF_SUCCESS;
14260}
14261
14262
14263/** Opcode 0xdb !11/5. */
14264FNIEMOP_DEF_1(iemOp_fld_m80r, uint8_t, bRm)
14265{
14266 IEMOP_MNEMONIC("fld m80r");
14267
14268 IEM_MC_BEGIN(2, 3);
14269 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14270 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14271 IEM_MC_LOCAL(RTFLOAT80U, r80Val);
14272 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14273 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT80U, pr80Val, r80Val, 1);
14274
14275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14276 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14277
14278 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14279 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14280 IEM_MC_FETCH_MEM_R80(r80Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14281
14282 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14283 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r80_from_r80, pFpuRes, pr80Val);
14284 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14285 IEM_MC_ELSE()
14286 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14287 IEM_MC_ENDIF();
14288 IEM_MC_USED_FPU();
14289 IEM_MC_ADVANCE_RIP();
14290
14291 IEM_MC_END();
14292 return VINF_SUCCESS;
14293}
14294
14295
14296/** Opcode 0xdb !11/7. */
14297FNIEMOP_DEF_1(iemOp_fstp_m80r, uint8_t, bRm)
14298{
14299 IEMOP_MNEMONIC("fstp m80r");
14300 IEM_MC_BEGIN(3, 2);
14301 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14302 IEM_MC_LOCAL(uint16_t, u16Fsw);
14303 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14304 IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1);
14305 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14306
14307 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14308 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14309 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14310 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14311
14312 IEM_MC_MEM_MAP(pr80Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14313 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14314 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r80, pu16Fsw, pr80Dst, pr80Value);
14315 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr80Dst, IEM_ACCESS_DATA_W, u16Fsw);
14316 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14317 IEM_MC_ELSE()
14318 IEM_MC_IF_FCW_IM()
14319 IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(pr80Dst);
14320 IEM_MC_MEM_COMMIT_AND_UNMAP(pr80Dst, IEM_ACCESS_DATA_W);
14321 IEM_MC_ENDIF();
14322 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14323 IEM_MC_ENDIF();
14324 IEM_MC_USED_FPU();
14325 IEM_MC_ADVANCE_RIP();
14326
14327 IEM_MC_END();
14328 return VINF_SUCCESS;
14329}
14330
14331
14332/** Opcode 0xdb 11/0. */
14333FNIEMOP_DEF_1(iemOp_fcmovnb_stN, uint8_t, bRm)
14334{
14335 IEMOP_MNEMONIC("fcmovnb st0,stN");
14336 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14337
14338 IEM_MC_BEGIN(0, 1);
14339 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14340
14341 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14342 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14343
14344 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14345 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF)
14346 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14347 IEM_MC_ENDIF();
14348 IEM_MC_UPDATE_FPU_OPCODE_IP();
14349 IEM_MC_ELSE()
14350 IEM_MC_FPU_STACK_UNDERFLOW(0);
14351 IEM_MC_ENDIF();
14352 IEM_MC_USED_FPU();
14353 IEM_MC_ADVANCE_RIP();
14354
14355 IEM_MC_END();
14356 return VINF_SUCCESS;
14357}
14358
14359
14360/** Opcode 0xdb 11/1. */
14361FNIEMOP_DEF_1(iemOp_fcmovne_stN, uint8_t, bRm)
14362{
14363 IEMOP_MNEMONIC("fcmovne st0,stN");
14364 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14365
14366 IEM_MC_BEGIN(0, 1);
14367 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14368
14369 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14370 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14371
14372 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14373 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
14374 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14375 IEM_MC_ENDIF();
14376 IEM_MC_UPDATE_FPU_OPCODE_IP();
14377 IEM_MC_ELSE()
14378 IEM_MC_FPU_STACK_UNDERFLOW(0);
14379 IEM_MC_ENDIF();
14380 IEM_MC_USED_FPU();
14381 IEM_MC_ADVANCE_RIP();
14382
14383 IEM_MC_END();
14384 return VINF_SUCCESS;
14385}
14386
14387
14388/** Opcode 0xdb 11/2. */
14389FNIEMOP_DEF_1(iemOp_fcmovnbe_stN, uint8_t, bRm)
14390{
14391 IEMOP_MNEMONIC("fcmovnbe st0,stN");
14392 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14393
14394 IEM_MC_BEGIN(0, 1);
14395 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14396
14397 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14398 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14399
14400 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14401 IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
14402 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14403 IEM_MC_ENDIF();
14404 IEM_MC_UPDATE_FPU_OPCODE_IP();
14405 IEM_MC_ELSE()
14406 IEM_MC_FPU_STACK_UNDERFLOW(0);
14407 IEM_MC_ENDIF();
14408 IEM_MC_USED_FPU();
14409 IEM_MC_ADVANCE_RIP();
14410
14411 IEM_MC_END();
14412 return VINF_SUCCESS;
14413}
14414
14415
14416/** Opcode 0xdb 11/3. */
14417FNIEMOP_DEF_1(iemOp_fcmovnnu_stN, uint8_t, bRm)
14418{
14419 IEMOP_MNEMONIC("fcmovnnu st0,stN");
14420 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14421
14422 IEM_MC_BEGIN(0, 1);
14423 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14424
14425 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14426 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14427
14428 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14429 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF)
14430 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14431 IEM_MC_ENDIF();
14432 IEM_MC_UPDATE_FPU_OPCODE_IP();
14433 IEM_MC_ELSE()
14434 IEM_MC_FPU_STACK_UNDERFLOW(0);
14435 IEM_MC_ENDIF();
14436 IEM_MC_USED_FPU();
14437 IEM_MC_ADVANCE_RIP();
14438
14439 IEM_MC_END();
14440 return VINF_SUCCESS;
14441}
14442
14443
14444/** Opcode 0xdb 0xe0. */
14445FNIEMOP_DEF(iemOp_fneni)
14446{
14447 IEMOP_MNEMONIC("fneni (8087/ign)");
14448 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14449 IEM_MC_BEGIN(0,0);
14450 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14451 IEM_MC_ADVANCE_RIP();
14452 IEM_MC_END();
14453 return VINF_SUCCESS;
14454}
14455
14456
14457/** Opcode 0xdb 0xe1. */
14458FNIEMOP_DEF(iemOp_fndisi)
14459{
14460 IEMOP_MNEMONIC("fndisi (8087/ign)");
14461 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14462 IEM_MC_BEGIN(0,0);
14463 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14464 IEM_MC_ADVANCE_RIP();
14465 IEM_MC_END();
14466 return VINF_SUCCESS;
14467}
14468
14469
14470/** Opcode 0xdb 0xe2. */
14471FNIEMOP_DEF(iemOp_fnclex)
14472{
14473 IEMOP_MNEMONIC("fnclex");
14474 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14475
14476 IEM_MC_BEGIN(0,0);
14477 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14478 IEM_MC_CLEAR_FSW_EX();
14479 IEM_MC_ADVANCE_RIP();
14480 IEM_MC_END();
14481 return VINF_SUCCESS;
14482}
14483
14484
14485/** Opcode 0xdb 0xe3. */
14486FNIEMOP_DEF(iemOp_fninit)
14487{
14488 IEMOP_MNEMONIC("fninit");
14489 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14490 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
14491}
14492
14493
14494/** Opcode 0xdb 0xe4. */
14495FNIEMOP_DEF(iemOp_fnsetpm)
14496{
14497 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
14498 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14499 IEM_MC_BEGIN(0,0);
14500 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14501 IEM_MC_ADVANCE_RIP();
14502 IEM_MC_END();
14503 return VINF_SUCCESS;
14504}
14505
14506
14507/** Opcode 0xdb 0xe5. */
14508FNIEMOP_DEF(iemOp_frstpm)
14509{
14510 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
14511#if 0 /* #UDs on newer CPUs */
14512 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14513 IEM_MC_BEGIN(0,0);
14514 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14515 IEM_MC_ADVANCE_RIP();
14516 IEM_MC_END();
14517 return VINF_SUCCESS;
14518#else
14519 return IEMOP_RAISE_INVALID_OPCODE();
14520#endif
14521}
14522
14523
14524/** Opcode 0xdb 11/5. */
14525FNIEMOP_DEF_1(iemOp_fucomi_stN, uint8_t, bRm)
14526{
14527 IEMOP_MNEMONIC("fucomi st0,stN");
14528 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fucomi_r80_by_r80, false /*fPop*/);
14529}
14530
14531
14532/** Opcode 0xdb 11/6. */
14533FNIEMOP_DEF_1(iemOp_fcomi_stN, uint8_t, bRm)
14534{
14535 IEMOP_MNEMONIC("fcomi st0,stN");
14536 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, false /*fPop*/);
14537}
14538
14539
14540/** Opcode 0xdb. */
14541FNIEMOP_DEF(iemOp_EscF3)
14542{
14543 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14544 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14546 {
14547 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14548 {
14549 case 0: FNIEMOP_CALL_1(iemOp_fcmovnb_stN, bRm);
14550 case 1: FNIEMOP_CALL_1(iemOp_fcmovne_stN, bRm);
14551 case 2: FNIEMOP_CALL_1(iemOp_fcmovnbe_stN, bRm);
14552 case 3: FNIEMOP_CALL_1(iemOp_fcmovnnu_stN, bRm);
14553 case 4:
14554 switch (bRm)
14555 {
14556 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
14557 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
14558 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
14559 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
14560 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
14561 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
14562 case 0xe6: return IEMOP_RAISE_INVALID_OPCODE();
14563 case 0xe7: return IEMOP_RAISE_INVALID_OPCODE();
14564 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14565 }
14566 break;
14567 case 5: return FNIEMOP_CALL_1(iemOp_fucomi_stN, bRm);
14568 case 6: return FNIEMOP_CALL_1(iemOp_fcomi_stN, bRm);
14569 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14571 }
14572 }
14573 else
14574 {
14575 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14576 {
14577 case 0: return FNIEMOP_CALL_1(iemOp_fild_m32i, bRm);
14578 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m32i,bRm);
14579 case 2: return FNIEMOP_CALL_1(iemOp_fist_m32i, bRm);
14580 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m32i, bRm);
14581 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14582 case 5: return FNIEMOP_CALL_1(iemOp_fld_m80r, bRm);
14583 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14584 case 7: return FNIEMOP_CALL_1(iemOp_fstp_m80r, bRm);
14585 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14586 }
14587 }
14588}
14589
14590
14591/**
14592 * Common worker for FPU instructions working on STn and ST0, and storing the
14593 * result in STn unless IE, DE or ZE was raised.
14594 *
14595 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14596 */
14597FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
14598{
14599 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14600
14601 IEM_MC_BEGIN(3, 1);
14602 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14603 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14604 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14605 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
14606
14607 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14608 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14609
14610 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
14611 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
14612 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
14613 IEM_MC_ELSE()
14614 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
14615 IEM_MC_ENDIF();
14616 IEM_MC_USED_FPU();
14617 IEM_MC_ADVANCE_RIP();
14618
14619 IEM_MC_END();
14620 return VINF_SUCCESS;
14621}
14622
14623
14624/** Opcode 0xdc 11/0. */
14625FNIEMOP_DEF_1(iemOp_fadd_stN_st0, uint8_t, bRm)
14626{
14627 IEMOP_MNEMONIC("fadd stN,st0");
14628 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fadd_r80_by_r80);
14629}
14630
14631
14632/** Opcode 0xdc 11/1. */
14633FNIEMOP_DEF_1(iemOp_fmul_stN_st0, uint8_t, bRm)
14634{
14635 IEMOP_MNEMONIC("fmul stN,st0");
14636 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fmul_r80_by_r80);
14637}
14638
14639
14640/** Opcode 0xdc 11/4. */
14641FNIEMOP_DEF_1(iemOp_fsubr_stN_st0, uint8_t, bRm)
14642{
14643 IEMOP_MNEMONIC("fsubr stN,st0");
14644 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsubr_r80_by_r80);
14645}
14646
14647
14648/** Opcode 0xdc 11/5. */
14649FNIEMOP_DEF_1(iemOp_fsub_stN_st0, uint8_t, bRm)
14650{
14651 IEMOP_MNEMONIC("fsub stN,st0");
14652 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsub_r80_by_r80);
14653}
14654
14655
14656/** Opcode 0xdc 11/6. */
14657FNIEMOP_DEF_1(iemOp_fdivr_stN_st0, uint8_t, bRm)
14658{
14659 IEMOP_MNEMONIC("fdivr stN,st0");
14660 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdivr_r80_by_r80);
14661}
14662
14663
14664/** Opcode 0xdc 11/7. */
14665FNIEMOP_DEF_1(iemOp_fdiv_stN_st0, uint8_t, bRm)
14666{
14667 IEMOP_MNEMONIC("fdiv stN,st0");
14668 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdiv_r80_by_r80);
14669}
14670
14671
14672/**
14673 * Common worker for FPU instructions working on ST0 and a 64-bit floating point
14674 * memory operand, and storing the result in ST0.
14675 *
14676 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14677 */
14678FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl)
14679{
14680 IEM_MC_BEGIN(3, 3);
14681 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14682 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14683 IEM_MC_LOCAL(RTFLOAT64U, r64Factor2);
14684 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14685 IEM_MC_ARG(PCRTFLOAT80U, pr80Factor1, 1);
14686 IEM_MC_ARG_LOCAL_REF(PRTFLOAT64U, pr64Factor2, r64Factor2, 2);
14687
14688 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14689 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14690 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14691 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14692
14693 IEM_MC_FETCH_MEM_R64(r64Factor2, pIemCpu->iEffSeg, GCPtrEffSrc);
14694 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Factor1, 0)
14695 IEM_MC_CALL_FPU_AIMPL_3(pfnImpl, pFpuRes, pr80Factor1, pr64Factor2);
14696 IEM_MC_STORE_FPU_RESULT_MEM_OP(FpuRes, 0, pIemCpu->iEffSeg, GCPtrEffSrc);
14697 IEM_MC_ELSE()
14698 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(0, pIemCpu->iEffSeg, GCPtrEffSrc);
14699 IEM_MC_ENDIF();
14700 IEM_MC_USED_FPU();
14701 IEM_MC_ADVANCE_RIP();
14702
14703 IEM_MC_END();
14704 return VINF_SUCCESS;
14705}
14706
14707
14708/** Opcode 0xdc !11/0. */
14709FNIEMOP_DEF_1(iemOp_fadd_m64r, uint8_t, bRm)
14710{
14711 IEMOP_MNEMONIC("fadd m64r");
14712 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fadd_r80_by_r64);
14713}
14714
14715
14716/** Opcode 0xdc !11/1. */
14717FNIEMOP_DEF_1(iemOp_fmul_m64r, uint8_t, bRm)
14718{
14719 IEMOP_MNEMONIC("fmul m64r");
14720 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fmul_r80_by_r64);
14721}
14722
14723
14724/** Opcode 0xdc !11/2. */
14725FNIEMOP_DEF_1(iemOp_fcom_m64r, uint8_t, bRm)
14726{
14727 IEMOP_MNEMONIC("fcom st0,m64r");
14728
14729 IEM_MC_BEGIN(3, 3);
14730 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14731 IEM_MC_LOCAL(uint16_t, u16Fsw);
14732 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
14733 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14734 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14735 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
14736
14737 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14738 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14739
14740 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14741 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14742 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14743
14744 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14745 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
14746 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14747 IEM_MC_ELSE()
14748 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14749 IEM_MC_ENDIF();
14750 IEM_MC_USED_FPU();
14751 IEM_MC_ADVANCE_RIP();
14752
14753 IEM_MC_END();
14754 return VINF_SUCCESS;
14755}
14756
14757
14758/** Opcode 0xdc !11/3. */
14759FNIEMOP_DEF_1(iemOp_fcomp_m64r, uint8_t, bRm)
14760{
14761 IEMOP_MNEMONIC("fcomp st0,m64r");
14762
14763 IEM_MC_BEGIN(3, 3);
14764 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14765 IEM_MC_LOCAL(uint16_t, u16Fsw);
14766 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
14767 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14768 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14769 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
14770
14771 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14772 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14773
14774 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14775 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14776 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14777
14778 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14779 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
14780 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14781 IEM_MC_ELSE()
14782 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14783 IEM_MC_ENDIF();
14784 IEM_MC_USED_FPU();
14785 IEM_MC_ADVANCE_RIP();
14786
14787 IEM_MC_END();
14788 return VINF_SUCCESS;
14789}
14790
14791
14792/** Opcode 0xdc !11/4. */
14793FNIEMOP_DEF_1(iemOp_fsub_m64r, uint8_t, bRm)
14794{
14795 IEMOP_MNEMONIC("fsub m64r");
14796 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsub_r80_by_r64);
14797}
14798
14799
14800/** Opcode 0xdc !11/5. */
14801FNIEMOP_DEF_1(iemOp_fsubr_m64r, uint8_t, bRm)
14802{
14803 IEMOP_MNEMONIC("fsubr m64r");
14804 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsubr_r80_by_r64);
14805}
14806
14807
14808/** Opcode 0xdc !11/6. */
14809FNIEMOP_DEF_1(iemOp_fdiv_m64r, uint8_t, bRm)
14810{
14811 IEMOP_MNEMONIC("fdiv m64r");
14812 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdiv_r80_by_r64);
14813}
14814
14815
14816/** Opcode 0xdc !11/7. */
14817FNIEMOP_DEF_1(iemOp_fdivr_m64r, uint8_t, bRm)
14818{
14819 IEMOP_MNEMONIC("fdivr m64r");
14820 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdivr_r80_by_r64);
14821}
14822
14823
14824/** Opcode 0xdc. */
14825FNIEMOP_DEF(iemOp_EscF4)
14826{
14827 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14828 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14829 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14830 {
14831 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14832 {
14833 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN_st0, bRm);
14834 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN_st0, bRm);
14835 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm); /* Marked reserved, intel behavior is that of FCOM ST(i). */
14836 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm); /* Marked reserved, intel behavior is that of FCOMP ST(i). */
14837 case 4: return FNIEMOP_CALL_1(iemOp_fsubr_stN_st0, bRm);
14838 case 5: return FNIEMOP_CALL_1(iemOp_fsub_stN_st0, bRm);
14839 case 6: return FNIEMOP_CALL_1(iemOp_fdivr_stN_st0, bRm);
14840 case 7: return FNIEMOP_CALL_1(iemOp_fdiv_stN_st0, bRm);
14841 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14842 }
14843 }
14844 else
14845 {
14846 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14847 {
14848 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m64r, bRm);
14849 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m64r, bRm);
14850 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m64r, bRm);
14851 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m64r, bRm);
14852 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m64r, bRm);
14853 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m64r, bRm);
14854 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m64r, bRm);
14855 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m64r, bRm);
14856 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14857 }
14858 }
14859}
14860
14861
14862/** Opcode 0xdd !11/0.
14863 * @sa iemOp_fld_m32r */
14864FNIEMOP_DEF_1(iemOp_fld_m64r, uint8_t, bRm)
14865{
14866 IEMOP_MNEMONIC("fld m64r");
14867
14868 IEM_MC_BEGIN(2, 3);
14869 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14870 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14871 IEM_MC_LOCAL(RTFLOAT64U, r64Val);
14872 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14873 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val, r64Val, 1);
14874
14875 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14876 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14877 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14878 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14879
14880 IEM_MC_FETCH_MEM_R64(r64Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14881 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14882 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r64_to_r80, pFpuRes, pr64Val);
14883 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14884 IEM_MC_ELSE()
14885 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14886 IEM_MC_ENDIF();
14887 IEM_MC_USED_FPU();
14888 IEM_MC_ADVANCE_RIP();
14889
14890 IEM_MC_END();
14891 return VINF_SUCCESS;
14892}
14893
14894
14895/** Opcode 0xdd !11/0. */
14896FNIEMOP_DEF_1(iemOp_fisttp_m64i, uint8_t, bRm)
14897{
14898 IEMOP_MNEMONIC("fisttp m64i");
14899 IEM_MC_BEGIN(3, 2);
14900 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14901 IEM_MC_LOCAL(uint16_t, u16Fsw);
14902 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14903 IEM_MC_ARG(int64_t *, pi64Dst, 1);
14904 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14905
14906 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14907 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14908 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14909 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14910
14911 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14912 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14913 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
14914 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14915 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14916 IEM_MC_ELSE()
14917 IEM_MC_IF_FCW_IM()
14918 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
14919 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
14920 IEM_MC_ENDIF();
14921 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14922 IEM_MC_ENDIF();
14923 IEM_MC_USED_FPU();
14924 IEM_MC_ADVANCE_RIP();
14925
14926 IEM_MC_END();
14927 return VINF_SUCCESS;
14928}
14929
14930
14931/** Opcode 0xdd !11/0. */
14932FNIEMOP_DEF_1(iemOp_fst_m64r, uint8_t, bRm)
14933{
14934 IEMOP_MNEMONIC("fst m64r");
14935 IEM_MC_BEGIN(3, 2);
14936 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14937 IEM_MC_LOCAL(uint16_t, u16Fsw);
14938 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14939 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
14940 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14941
14942 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14943 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14944 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14945 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14946
14947 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14948 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14949 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
14950 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14951 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14952 IEM_MC_ELSE()
14953 IEM_MC_IF_FCW_IM()
14954 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
14955 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
14956 IEM_MC_ENDIF();
14957 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14958 IEM_MC_ENDIF();
14959 IEM_MC_USED_FPU();
14960 IEM_MC_ADVANCE_RIP();
14961
14962 IEM_MC_END();
14963 return VINF_SUCCESS;
14964}
14965
14966
14967
14968
14969/** Opcode 0xdd !11/0. */
14970FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm)
14971{
14972 IEMOP_MNEMONIC("fstp m64r");
14973 IEM_MC_BEGIN(3, 2);
14974 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14975 IEM_MC_LOCAL(uint16_t, u16Fsw);
14976 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14977 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
14978 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14979
14980 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14981 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14982 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14983 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14984
14985 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14986 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14987 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
14988 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
14989 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14990 IEM_MC_ELSE()
14991 IEM_MC_IF_FCW_IM()
14992 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
14993 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
14994 IEM_MC_ENDIF();
14995 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14996 IEM_MC_ENDIF();
14997 IEM_MC_USED_FPU();
14998 IEM_MC_ADVANCE_RIP();
14999
15000 IEM_MC_END();
15001 return VINF_SUCCESS;
15002}
15003
15004
15005/** Opcode 0xdd !11/0. */
15006FNIEMOP_DEF_1(iemOp_frstor, uint8_t, bRm)
15007{
15008 IEMOP_MNEMONIC("fxrstor m94/108byte");
15009 IEM_MC_BEGIN(3, 0);
15010 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
15011 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
15012 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
15013 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15014 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15015 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15016 IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc);
15017 IEM_MC_END();
15018 return VINF_SUCCESS;
15019}
15020
15021
15022/** Opcode 0xdd !11/0. */
15023FNIEMOP_DEF_1(iemOp_fnsave, uint8_t, bRm)
15024{
15025 IEMOP_MNEMONIC("fnsave m94/108byte");
15026 IEM_MC_BEGIN(3, 0);
15027 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
15028 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
15029 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
15030 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15031 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15032 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15033 IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst);
15034 IEM_MC_END();
15035 return VINF_SUCCESS;
15036
15037}
15038
15039/** Opcode 0xdd !11/0. */
15040FNIEMOP_DEF_1(iemOp_fnstsw, uint8_t, bRm)
15041{
15042 IEMOP_MNEMONIC("fnstsw m16");
15043
15044 IEM_MC_BEGIN(0, 2);
15045 IEM_MC_LOCAL(uint16_t, u16Tmp);
15046 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15047
15048 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15049 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15050 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15051
15052 IEM_MC_FETCH_FSW(u16Tmp);
15053 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
15054 IEM_MC_ADVANCE_RIP();
15055
15056/** @todo Debug / drop a hint to the verifier that things may differ
15057 * from REM. Seen 0x4020 (iem) vs 0x4000 (rem) at 0008:801c6b88 booting
15058 * NT4SP1. (X86_FSW_PE) */
15059 IEM_MC_END();
15060 return VINF_SUCCESS;
15061}
15062
15063
15064/** Opcode 0xdd 11/0. */
15065FNIEMOP_DEF_1(iemOp_ffree_stN, uint8_t, bRm)
15066{
15067 IEMOP_MNEMONIC("ffree stN");
15068 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15069 /* Note! C0, C1, C2 and C3 are documented as undefined, we leave the
15070 unmodified. */
15071
15072 IEM_MC_BEGIN(0, 0);
15073
15074 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15075 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15076
15077 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15078 IEM_MC_UPDATE_FPU_OPCODE_IP();
15079
15080 IEM_MC_USED_FPU();
15081 IEM_MC_ADVANCE_RIP();
15082 IEM_MC_END();
15083 return VINF_SUCCESS;
15084}
15085
15086
15087/** Opcode 0xdd 11/1. */
15088FNIEMOP_DEF_1(iemOp_fst_stN, uint8_t, bRm)
15089{
15090 IEMOP_MNEMONIC("fst st0,stN");
15091 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15092
15093 IEM_MC_BEGIN(0, 2);
15094 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
15095 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15096 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15097 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15098 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15099 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
15100 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
15101 IEM_MC_ELSE()
15102 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
15103 IEM_MC_ENDIF();
15104 IEM_MC_USED_FPU();
15105 IEM_MC_ADVANCE_RIP();
15106 IEM_MC_END();
15107 return VINF_SUCCESS;
15108}
15109
15110
15111/** Opcode 0xdd 11/3. */
15112FNIEMOP_DEF_1(iemOp_fucom_stN_st0, uint8_t, bRm)
15113{
15114 IEMOP_MNEMONIC("fcom st0,stN");
15115 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fucom_r80_by_r80);
15116}
15117
15118
15119/** Opcode 0xdd 11/4. */
15120FNIEMOP_DEF_1(iemOp_fucomp_stN, uint8_t, bRm)
15121{
15122 IEMOP_MNEMONIC("fcomp st0,stN");
15123 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fucom_r80_by_r80);
15124}
15125
15126
15127/** Opcode 0xdd. */
15128FNIEMOP_DEF(iemOp_EscF5)
15129{
15130 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15131 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15132 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15133 {
15134 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15135 {
15136 case 0: return FNIEMOP_CALL_1(iemOp_ffree_stN, bRm);
15137 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, intel behavior is that of XCHG ST(i). */
15138 case 2: return FNIEMOP_CALL_1(iemOp_fst_stN, bRm);
15139 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm);
15140 case 4: return FNIEMOP_CALL_1(iemOp_fucom_stN_st0,bRm);
15141 case 5: return FNIEMOP_CALL_1(iemOp_fucomp_stN, bRm);
15142 case 6: return IEMOP_RAISE_INVALID_OPCODE();
15143 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15144 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15145 }
15146 }
15147 else
15148 {
15149 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15150 {
15151 case 0: return FNIEMOP_CALL_1(iemOp_fld_m64r, bRm);
15152 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m64i, bRm);
15153 case 2: return FNIEMOP_CALL_1(iemOp_fst_m64r, bRm);
15154 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m64r, bRm);
15155 case 4: return FNIEMOP_CALL_1(iemOp_frstor, bRm);
15156 case 5: return IEMOP_RAISE_INVALID_OPCODE();
15157 case 6: return FNIEMOP_CALL_1(iemOp_fnsave, bRm);
15158 case 7: return FNIEMOP_CALL_1(iemOp_fnstsw, bRm);
15159 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15160 }
15161 }
15162}
15163
15164
15165/** Opcode 0xde 11/0. */
15166FNIEMOP_DEF_1(iemOp_faddp_stN_st0, uint8_t, bRm)
15167{
15168 IEMOP_MNEMONIC("faddp stN,st0");
15169 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fadd_r80_by_r80);
15170}
15171
15172
15173/** Opcode 0xde 11/0. */
15174FNIEMOP_DEF_1(iemOp_fmulp_stN_st0, uint8_t, bRm)
15175{
15176 IEMOP_MNEMONIC("fmulp stN,st0");
15177 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fmul_r80_by_r80);
15178}
15179
15180
15181/** Opcode 0xde 0xd9. */
15182FNIEMOP_DEF(iemOp_fcompp)
15183{
15184 IEMOP_MNEMONIC("fucompp st0,stN");
15185 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fcom_r80_by_r80);
15186}
15187
15188
15189/** Opcode 0xde 11/4. */
15190FNIEMOP_DEF_1(iemOp_fsubrp_stN_st0, uint8_t, bRm)
15191{
15192 IEMOP_MNEMONIC("fsubrp stN,st0");
15193 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsubr_r80_by_r80);
15194}
15195
15196
15197/** Opcode 0xde 11/5. */
15198FNIEMOP_DEF_1(iemOp_fsubp_stN_st0, uint8_t, bRm)
15199{
15200 IEMOP_MNEMONIC("fsubp stN,st0");
15201 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsub_r80_by_r80);
15202}
15203
15204
15205/** Opcode 0xde 11/6. */
15206FNIEMOP_DEF_1(iemOp_fdivrp_stN_st0, uint8_t, bRm)
15207{
15208 IEMOP_MNEMONIC("fdivrp stN,st0");
15209 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdivr_r80_by_r80);
15210}
15211
15212
15213/** Opcode 0xde 11/7. */
15214FNIEMOP_DEF_1(iemOp_fdivp_stN_st0, uint8_t, bRm)
15215{
15216 IEMOP_MNEMONIC("fdivp stN,st0");
15217 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdiv_r80_by_r80);
15218}
15219
15220
15221/**
15222 * Common worker for FPU instructions working on ST0 and an m16i, and storing
15223 * the result in ST0.
15224 *
15225 * @param pfnAImpl Pointer to the instruction implementation (assembly).
15226 */
15227FNIEMOP_DEF_2(iemOpHlpFpu_st0_m16i, uint8_t, bRm, PFNIEMAIMPLFPUI16, pfnAImpl)
15228{
15229 IEM_MC_BEGIN(3, 3);
15230 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15231 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15232 IEM_MC_LOCAL(int16_t, i16Val2);
15233 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15234 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15235 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15236
15237 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15238 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15239
15240 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15241 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15242 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15243
15244 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15245 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi16Val2);
15246 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
15247 IEM_MC_ELSE()
15248 IEM_MC_FPU_STACK_UNDERFLOW(0);
15249 IEM_MC_ENDIF();
15250 IEM_MC_USED_FPU();
15251 IEM_MC_ADVANCE_RIP();
15252
15253 IEM_MC_END();
15254 return VINF_SUCCESS;
15255}
15256
15257
15258/** Opcode 0xde !11/0. */
15259FNIEMOP_DEF_1(iemOp_fiadd_m16i, uint8_t, bRm)
15260{
15261 IEMOP_MNEMONIC("fiadd m16i");
15262 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fiadd_r80_by_i16);
15263}
15264
15265
15266/** Opcode 0xde !11/1. */
15267FNIEMOP_DEF_1(iemOp_fimul_m16i, uint8_t, bRm)
15268{
15269 IEMOP_MNEMONIC("fimul m16i");
15270 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fimul_r80_by_i16);
15271}
15272
15273
15274/** Opcode 0xde !11/2. */
15275FNIEMOP_DEF_1(iemOp_ficom_m16i, uint8_t, bRm)
15276{
15277 IEMOP_MNEMONIC("ficom st0,m16i");
15278
15279 IEM_MC_BEGIN(3, 3);
15280 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15281 IEM_MC_LOCAL(uint16_t, u16Fsw);
15282 IEM_MC_LOCAL(int16_t, i16Val2);
15283 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15284 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15285 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15286
15287 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15288 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15289
15290 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15291 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15292 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15293
15294 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15295 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15296 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15297 IEM_MC_ELSE()
15298 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15299 IEM_MC_ENDIF();
15300 IEM_MC_USED_FPU();
15301 IEM_MC_ADVANCE_RIP();
15302
15303 IEM_MC_END();
15304 return VINF_SUCCESS;
15305}
15306
15307
15308/** Opcode 0xde !11/3. */
15309FNIEMOP_DEF_1(iemOp_ficomp_m16i, uint8_t, bRm)
15310{
15311 IEMOP_MNEMONIC("ficomp st0,m16i");
15312
15313 IEM_MC_BEGIN(3, 3);
15314 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15315 IEM_MC_LOCAL(uint16_t, u16Fsw);
15316 IEM_MC_LOCAL(int16_t, i16Val2);
15317 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15318 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15319 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15320
15321 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15322 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15323
15324 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15325 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15326 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15327
15328 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15329 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15330 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15331 IEM_MC_ELSE()
15332 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15333 IEM_MC_ENDIF();
15334 IEM_MC_USED_FPU();
15335 IEM_MC_ADVANCE_RIP();
15336
15337 IEM_MC_END();
15338 return VINF_SUCCESS;
15339}
15340
15341
15342/** Opcode 0xde !11/4. */
15343FNIEMOP_DEF_1(iemOp_fisub_m16i, uint8_t, bRm)
15344{
15345 IEMOP_MNEMONIC("fisub m16i");
15346 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisub_r80_by_i16);
15347}
15348
15349
15350/** Opcode 0xde !11/5. */
15351FNIEMOP_DEF_1(iemOp_fisubr_m16i, uint8_t, bRm)
15352{
15353 IEMOP_MNEMONIC("fisubr m16i");
15354 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisubr_r80_by_i16);
15355}
15356
15357
15358/** Opcode 0xde !11/6. */
15359FNIEMOP_DEF_1(iemOp_fidiv_m16i, uint8_t, bRm)
15360{
15361 IEMOP_MNEMONIC("fiadd m16i");
15362 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidiv_r80_by_i16);
15363}
15364
15365
15366/** Opcode 0xde !11/7. */
15367FNIEMOP_DEF_1(iemOp_fidivr_m16i, uint8_t, bRm)
15368{
15369 IEMOP_MNEMONIC("fiadd m16i");
15370 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidivr_r80_by_i16);
15371}
15372
15373
15374/** Opcode 0xde. */
15375FNIEMOP_DEF(iemOp_EscF6)
15376{
15377 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15378 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15379 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15380 {
15381 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15382 {
15383 case 0: return FNIEMOP_CALL_1(iemOp_faddp_stN_st0, bRm);
15384 case 1: return FNIEMOP_CALL_1(iemOp_fmulp_stN_st0, bRm);
15385 case 2: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
15386 case 3: if (bRm == 0xd9)
15387 return FNIEMOP_CALL(iemOp_fcompp);
15388 return IEMOP_RAISE_INVALID_OPCODE();
15389 case 4: return FNIEMOP_CALL_1(iemOp_fsubrp_stN_st0, bRm);
15390 case 5: return FNIEMOP_CALL_1(iemOp_fsubp_stN_st0, bRm);
15391 case 6: return FNIEMOP_CALL_1(iemOp_fdivrp_stN_st0, bRm);
15392 case 7: return FNIEMOP_CALL_1(iemOp_fdivp_stN_st0, bRm);
15393 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15394 }
15395 }
15396 else
15397 {
15398 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15399 {
15400 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m16i, bRm);
15401 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m16i, bRm);
15402 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m16i, bRm);
15403 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m16i, bRm);
15404 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m16i, bRm);
15405 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m16i, bRm);
15406 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m16i, bRm);
15407 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m16i, bRm);
15408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15409 }
15410 }
15411}
15412
15413
15414/** Opcode 0xdf 11/0.
15415 * Undocument instruction, assumed to work like ffree + fincstp. */
15416FNIEMOP_DEF_1(iemOp_ffreep_stN, uint8_t, bRm)
15417{
15418 IEMOP_MNEMONIC("ffreep stN");
15419 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15420
15421 IEM_MC_BEGIN(0, 0);
15422
15423 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15424 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15425
15426 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15427 IEM_MC_FPU_STACK_INC_TOP();
15428 IEM_MC_UPDATE_FPU_OPCODE_IP();
15429
15430 IEM_MC_USED_FPU();
15431 IEM_MC_ADVANCE_RIP();
15432 IEM_MC_END();
15433 return VINF_SUCCESS;
15434}
15435
15436
15437/** Opcode 0xdf 0xe0. */
15438FNIEMOP_DEF(iemOp_fnstsw_ax)
15439{
15440 IEMOP_MNEMONIC("fnstsw ax");
15441 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15442
15443 IEM_MC_BEGIN(0, 1);
15444 IEM_MC_LOCAL(uint16_t, u16Tmp);
15445 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15446 IEM_MC_FETCH_FSW(u16Tmp);
15447 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
15448 IEM_MC_ADVANCE_RIP();
15449 IEM_MC_END();
15450 return VINF_SUCCESS;
15451}
15452
15453
15454/** Opcode 0xdf 11/5. */
15455FNIEMOP_DEF_1(iemOp_fucomip_st0_stN, uint8_t, bRm)
15456{
15457 IEMOP_MNEMONIC("fcomip st0,stN");
15458 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15459}
15460
15461
15462/** Opcode 0xdf 11/6. */
15463FNIEMOP_DEF_1(iemOp_fcomip_st0_stN, uint8_t, bRm)
15464{
15465 IEMOP_MNEMONIC("fcomip st0,stN");
15466 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15467}
15468
15469
15470/** Opcode 0xdf !11/0. */
15471FNIEMOP_STUB_1(iemOp_fild_m16i, uint8_t, bRm);
15472
15473
15474/** Opcode 0xdf !11/1. */
15475FNIEMOP_DEF_1(iemOp_fisttp_m16i, uint8_t, bRm)
15476{
15477 IEMOP_MNEMONIC("fisttp m16i");
15478 IEM_MC_BEGIN(3, 2);
15479 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15480 IEM_MC_LOCAL(uint16_t, u16Fsw);
15481 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15482 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15483 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15484
15485 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15486 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15487 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15488 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15489
15490 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15491 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15492 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15493 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15494 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15495 IEM_MC_ELSE()
15496 IEM_MC_IF_FCW_IM()
15497 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15498 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15499 IEM_MC_ENDIF();
15500 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15501 IEM_MC_ENDIF();
15502 IEM_MC_USED_FPU();
15503 IEM_MC_ADVANCE_RIP();
15504
15505 IEM_MC_END();
15506 return VINF_SUCCESS;
15507}
15508
15509
15510/** Opcode 0xdf !11/2. */
15511FNIEMOP_DEF_1(iemOp_fist_m16i, uint8_t, bRm)
15512{
15513 IEMOP_MNEMONIC("fistp m16i");
15514 IEM_MC_BEGIN(3, 2);
15515 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15516 IEM_MC_LOCAL(uint16_t, u16Fsw);
15517 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15518 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15519 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15520
15521 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15522 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15523 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15524 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15525
15526 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15527 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15528 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15529 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15530 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15531 IEM_MC_ELSE()
15532 IEM_MC_IF_FCW_IM()
15533 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15534 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15535 IEM_MC_ENDIF();
15536 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15537 IEM_MC_ENDIF();
15538 IEM_MC_USED_FPU();
15539 IEM_MC_ADVANCE_RIP();
15540
15541 IEM_MC_END();
15542 return VINF_SUCCESS;
15543}
15544
15545
15546/** Opcode 0xdf !11/3. */
15547FNIEMOP_DEF_1(iemOp_fistp_m16i, uint8_t, bRm)
15548{
15549 IEMOP_MNEMONIC("fistp m16i");
15550 IEM_MC_BEGIN(3, 2);
15551 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15552 IEM_MC_LOCAL(uint16_t, u16Fsw);
15553 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15554 IEM_MC_ARG(int16_t *, pi16Dst, 1);
15555 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15556
15557 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15558 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15559 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15560 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15561
15562 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15563 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15564 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
15565 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
15566 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15567 IEM_MC_ELSE()
15568 IEM_MC_IF_FCW_IM()
15569 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
15570 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
15571 IEM_MC_ENDIF();
15572 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15573 IEM_MC_ENDIF();
15574 IEM_MC_USED_FPU();
15575 IEM_MC_ADVANCE_RIP();
15576
15577 IEM_MC_END();
15578 return VINF_SUCCESS;
15579}
15580
15581
15582/** Opcode 0xdf !11/4. */
15583FNIEMOP_STUB_1(iemOp_fbld_m80d, uint8_t, bRm);
15584
15585/** Opcode 0xdf !11/5. */
15586FNIEMOP_STUB_1(iemOp_fild_m64i, uint8_t, bRm);
15587
15588/** Opcode 0xdf !11/6. */
15589FNIEMOP_STUB_1(iemOp_fbstp_m80d, uint8_t, bRm);
15590
15591
15592/** Opcode 0xdf !11/7. */
15593FNIEMOP_DEF_1(iemOp_fistp_m64i, uint8_t, bRm)
15594{
15595 IEMOP_MNEMONIC("fistp m64i");
15596 IEM_MC_BEGIN(3, 2);
15597 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15598 IEM_MC_LOCAL(uint16_t, u16Fsw);
15599 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15600 IEM_MC_ARG(int64_t *, pi64Dst, 1);
15601 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15602
15603 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15604 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15605 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15606 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15607
15608 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15609 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15610 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
15611 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
15612 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15613 IEM_MC_ELSE()
15614 IEM_MC_IF_FCW_IM()
15615 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
15616 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
15617 IEM_MC_ENDIF();
15618 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15619 IEM_MC_ENDIF();
15620 IEM_MC_USED_FPU();
15621 IEM_MC_ADVANCE_RIP();
15622
15623 IEM_MC_END();
15624 return VINF_SUCCESS;
15625}
15626
15627
15628/** Opcode 0xdf. */
15629FNIEMOP_DEF(iemOp_EscF7)
15630{
15631 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15632 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15633 {
15634 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15635 {
15636 case 0: return FNIEMOP_CALL_1(iemOp_ffreep_stN, bRm); /* ffree + pop afterwards, since forever according to AMD. */
15637 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, behaves like FXCH ST(i) on intel. */
15638 case 2: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
15639 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
15640 case 4: if (bRm == 0xe0)
15641 return FNIEMOP_CALL(iemOp_fnstsw_ax);
15642 return IEMOP_RAISE_INVALID_OPCODE();
15643 case 5: return FNIEMOP_CALL_1(iemOp_fucomip_st0_stN, bRm);
15644 case 6: return FNIEMOP_CALL_1(iemOp_fcomip_st0_stN, bRm);
15645 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15646 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15647 }
15648 }
15649 else
15650 {
15651 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15652 {
15653 case 0: return FNIEMOP_CALL_1(iemOp_fild_m16i, bRm);
15654 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m16i, bRm);
15655 case 2: return FNIEMOP_CALL_1(iemOp_fist_m16i, bRm);
15656 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m16i, bRm);
15657 case 4: return FNIEMOP_CALL_1(iemOp_fbld_m80d, bRm);
15658 case 5: return FNIEMOP_CALL_1(iemOp_fild_m64i, bRm);
15659 case 6: return FNIEMOP_CALL_1(iemOp_fbstp_m80d, bRm);
15660 case 7: return FNIEMOP_CALL_1(iemOp_fistp_m64i, bRm);
15661 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15662 }
15663 }
15664}
15665
15666
15667/** Opcode 0xe0. */
15668FNIEMOP_DEF(iemOp_loopne_Jb)
15669{
15670 IEMOP_MNEMONIC("loopne Jb");
15671 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15672 IEMOP_HLP_NO_LOCK_PREFIX();
15673 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15674
15675 switch (pIemCpu->enmEffAddrMode)
15676 {
15677 case IEMMODE_16BIT:
15678 IEM_MC_BEGIN(0,0);
15679 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15680 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15681 IEM_MC_REL_JMP_S8(i8Imm);
15682 } IEM_MC_ELSE() {
15683 IEM_MC_ADVANCE_RIP();
15684 } IEM_MC_ENDIF();
15685 IEM_MC_END();
15686 return VINF_SUCCESS;
15687
15688 case IEMMODE_32BIT:
15689 IEM_MC_BEGIN(0,0);
15690 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15691 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15692 IEM_MC_REL_JMP_S8(i8Imm);
15693 } IEM_MC_ELSE() {
15694 IEM_MC_ADVANCE_RIP();
15695 } IEM_MC_ENDIF();
15696 IEM_MC_END();
15697 return VINF_SUCCESS;
15698
15699 case IEMMODE_64BIT:
15700 IEM_MC_BEGIN(0,0);
15701 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15702 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
15703 IEM_MC_REL_JMP_S8(i8Imm);
15704 } IEM_MC_ELSE() {
15705 IEM_MC_ADVANCE_RIP();
15706 } IEM_MC_ENDIF();
15707 IEM_MC_END();
15708 return VINF_SUCCESS;
15709
15710 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15711 }
15712}
15713
15714
15715/** Opcode 0xe1. */
15716FNIEMOP_DEF(iemOp_loope_Jb)
15717{
15718 IEMOP_MNEMONIC("loope Jb");
15719 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15720 IEMOP_HLP_NO_LOCK_PREFIX();
15721 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15722
15723 switch (pIemCpu->enmEffAddrMode)
15724 {
15725 case IEMMODE_16BIT:
15726 IEM_MC_BEGIN(0,0);
15727 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15728 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15729 IEM_MC_REL_JMP_S8(i8Imm);
15730 } IEM_MC_ELSE() {
15731 IEM_MC_ADVANCE_RIP();
15732 } IEM_MC_ENDIF();
15733 IEM_MC_END();
15734 return VINF_SUCCESS;
15735
15736 case IEMMODE_32BIT:
15737 IEM_MC_BEGIN(0,0);
15738 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15739 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15740 IEM_MC_REL_JMP_S8(i8Imm);
15741 } IEM_MC_ELSE() {
15742 IEM_MC_ADVANCE_RIP();
15743 } IEM_MC_ENDIF();
15744 IEM_MC_END();
15745 return VINF_SUCCESS;
15746
15747 case IEMMODE_64BIT:
15748 IEM_MC_BEGIN(0,0);
15749 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15750 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
15751 IEM_MC_REL_JMP_S8(i8Imm);
15752 } IEM_MC_ELSE() {
15753 IEM_MC_ADVANCE_RIP();
15754 } IEM_MC_ENDIF();
15755 IEM_MC_END();
15756 return VINF_SUCCESS;
15757
15758 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15759 }
15760}
15761
15762
15763/** Opcode 0xe2. */
15764FNIEMOP_DEF(iemOp_loop_Jb)
15765{
15766 IEMOP_MNEMONIC("loop Jb");
15767 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15768 IEMOP_HLP_NO_LOCK_PREFIX();
15769 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15770
15771 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
15772 * using the 32-bit operand size override. How can that be restarted? See
15773 * weird pseudo code in intel manual. */
15774 switch (pIemCpu->enmEffAddrMode)
15775 {
15776 case IEMMODE_16BIT:
15777 IEM_MC_BEGIN(0,0);
15778 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
15779 {
15780 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
15781 IEM_MC_IF_CX_IS_NZ() {
15782 IEM_MC_REL_JMP_S8(i8Imm);
15783 } IEM_MC_ELSE() {
15784 IEM_MC_ADVANCE_RIP();
15785 } IEM_MC_ENDIF();
15786 }
15787 else
15788 {
15789 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xCX, 0);
15790 IEM_MC_ADVANCE_RIP();
15791 }
15792 IEM_MC_END();
15793 return VINF_SUCCESS;
15794
15795 case IEMMODE_32BIT:
15796 IEM_MC_BEGIN(0,0);
15797 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
15798 {
15799 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
15800 IEM_MC_IF_ECX_IS_NZ() {
15801 IEM_MC_REL_JMP_S8(i8Imm);
15802 } IEM_MC_ELSE() {
15803 IEM_MC_ADVANCE_RIP();
15804 } IEM_MC_ENDIF();
15805 }
15806 else
15807 {
15808 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xCX, 0);
15809 IEM_MC_ADVANCE_RIP();
15810 }
15811 IEM_MC_END();
15812 return VINF_SUCCESS;
15813
15814 case IEMMODE_64BIT:
15815 IEM_MC_BEGIN(0,0);
15816 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
15817 {
15818 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
15819 IEM_MC_IF_RCX_IS_NZ() {
15820 IEM_MC_REL_JMP_S8(i8Imm);
15821 } IEM_MC_ELSE() {
15822 IEM_MC_ADVANCE_RIP();
15823 } IEM_MC_ENDIF();
15824 }
15825 else
15826 {
15827 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xCX, 0);
15828 IEM_MC_ADVANCE_RIP();
15829 }
15830 IEM_MC_END();
15831 return VINF_SUCCESS;
15832
15833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15834 }
15835}
15836
15837
15838/** Opcode 0xe3. */
15839FNIEMOP_DEF(iemOp_jecxz_Jb)
15840{
15841 IEMOP_MNEMONIC("jecxz Jb");
15842 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
15843 IEMOP_HLP_NO_LOCK_PREFIX();
15844 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15845
15846 switch (pIemCpu->enmEffAddrMode)
15847 {
15848 case IEMMODE_16BIT:
15849 IEM_MC_BEGIN(0,0);
15850 IEM_MC_IF_CX_IS_NZ() {
15851 IEM_MC_ADVANCE_RIP();
15852 } IEM_MC_ELSE() {
15853 IEM_MC_REL_JMP_S8(i8Imm);
15854 } IEM_MC_ENDIF();
15855 IEM_MC_END();
15856 return VINF_SUCCESS;
15857
15858 case IEMMODE_32BIT:
15859 IEM_MC_BEGIN(0,0);
15860 IEM_MC_IF_ECX_IS_NZ() {
15861 IEM_MC_ADVANCE_RIP();
15862 } IEM_MC_ELSE() {
15863 IEM_MC_REL_JMP_S8(i8Imm);
15864 } IEM_MC_ENDIF();
15865 IEM_MC_END();
15866 return VINF_SUCCESS;
15867
15868 case IEMMODE_64BIT:
15869 IEM_MC_BEGIN(0,0);
15870 IEM_MC_IF_RCX_IS_NZ() {
15871 IEM_MC_ADVANCE_RIP();
15872 } IEM_MC_ELSE() {
15873 IEM_MC_REL_JMP_S8(i8Imm);
15874 } IEM_MC_ENDIF();
15875 IEM_MC_END();
15876 return VINF_SUCCESS;
15877
15878 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15879 }
15880}
15881
15882
15883/** Opcode 0xe4 */
15884FNIEMOP_DEF(iemOp_in_AL_Ib)
15885{
15886 IEMOP_MNEMONIC("in eAX,Ib");
15887 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15888 IEMOP_HLP_NO_LOCK_PREFIX();
15889 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
15890}
15891
15892
15893/** Opcode 0xe5 */
15894FNIEMOP_DEF(iemOp_in_eAX_Ib)
15895{
15896 IEMOP_MNEMONIC("in eAX,Ib");
15897 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15898 IEMOP_HLP_NO_LOCK_PREFIX();
15899 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15900}
15901
15902
15903/** Opcode 0xe6 */
15904FNIEMOP_DEF(iemOp_out_Ib_AL)
15905{
15906 IEMOP_MNEMONIC("out Ib,AL");
15907 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15908 IEMOP_HLP_NO_LOCK_PREFIX();
15909 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
15910}
15911
15912
15913/** Opcode 0xe7 */
15914FNIEMOP_DEF(iemOp_out_Ib_eAX)
15915{
15916 IEMOP_MNEMONIC("out Ib,eAX");
15917 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
15918 IEMOP_HLP_NO_LOCK_PREFIX();
15919 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
15920}
15921
15922
15923/** Opcode 0xe8. */
15924FNIEMOP_DEF(iemOp_call_Jv)
15925{
15926 IEMOP_MNEMONIC("call Jv");
15927 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15928 switch (pIemCpu->enmEffOpSize)
15929 {
15930 case IEMMODE_16BIT:
15931 {
15932 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
15933 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
15934 }
15935
15936 case IEMMODE_32BIT:
15937 {
15938 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
15939 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
15940 }
15941
15942 case IEMMODE_64BIT:
15943 {
15944 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
15945 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
15946 }
15947
15948 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15949 }
15950}
15951
15952
15953/** Opcode 0xe9. */
15954FNIEMOP_DEF(iemOp_jmp_Jv)
15955{
15956 IEMOP_MNEMONIC("jmp Jv");
15957 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
15958 switch (pIemCpu->enmEffOpSize)
15959 {
15960 case IEMMODE_16BIT:
15961 {
15962 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
15963 IEM_MC_BEGIN(0, 0);
15964 IEM_MC_REL_JMP_S16(i16Imm);
15965 IEM_MC_END();
15966 return VINF_SUCCESS;
15967 }
15968
15969 case IEMMODE_64BIT:
15970 case IEMMODE_32BIT:
15971 {
15972 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
15973 IEM_MC_BEGIN(0, 0);
15974 IEM_MC_REL_JMP_S32(i32Imm);
15975 IEM_MC_END();
15976 return VINF_SUCCESS;
15977 }
15978
15979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15980 }
15981}
15982
15983
15984/** Opcode 0xea. */
15985FNIEMOP_DEF(iemOp_jmp_Ap)
15986{
15987 IEMOP_MNEMONIC("jmp Ap");
15988 IEMOP_HLP_NO_64BIT();
15989
15990 /* Decode the far pointer address and pass it on to the far call C implementation. */
15991 uint32_t offSeg;
15992 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
15993 IEM_OPCODE_GET_NEXT_U32(&offSeg);
15994 else
15995 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
15996 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
15997 IEMOP_HLP_NO_LOCK_PREFIX();
15998 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
15999}
16000
16001
16002/** Opcode 0xeb. */
16003FNIEMOP_DEF(iemOp_jmp_Jb)
16004{
16005 IEMOP_MNEMONIC("jmp Jb");
16006 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16007 IEMOP_HLP_NO_LOCK_PREFIX();
16008 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16009
16010 IEM_MC_BEGIN(0, 0);
16011 IEM_MC_REL_JMP_S8(i8Imm);
16012 IEM_MC_END();
16013 return VINF_SUCCESS;
16014}
16015
16016
16017/** Opcode 0xec */
16018FNIEMOP_DEF(iemOp_in_AL_DX)
16019{
16020 IEMOP_MNEMONIC("in AL,DX");
16021 IEMOP_HLP_NO_LOCK_PREFIX();
16022 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
16023}
16024
16025
16026/** Opcode 0xed */
16027FNIEMOP_DEF(iemOp_eAX_DX)
16028{
16029 IEMOP_MNEMONIC("in eAX,DX");
16030 IEMOP_HLP_NO_LOCK_PREFIX();
16031 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16032}
16033
16034
16035/** Opcode 0xee */
16036FNIEMOP_DEF(iemOp_out_DX_AL)
16037{
16038 IEMOP_MNEMONIC("out DX,AL");
16039 IEMOP_HLP_NO_LOCK_PREFIX();
16040 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
16041}
16042
16043
16044/** Opcode 0xef */
16045FNIEMOP_DEF(iemOp_out_DX_eAX)
16046{
16047 IEMOP_MNEMONIC("out DX,eAX");
16048 IEMOP_HLP_NO_LOCK_PREFIX();
16049 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16050}
16051
16052
16053/** Opcode 0xf0. */
16054FNIEMOP_DEF(iemOp_lock)
16055{
16056 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock");
16057 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
16058
16059 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16060 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16061}
16062
16063
16064/** Opcode 0xf2. */
16065FNIEMOP_DEF(iemOp_repne)
16066{
16067 /* This overrides any previous REPE prefix. */
16068 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
16069 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne");
16070 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
16071
16072 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16073 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16074}
16075
16076
16077/** Opcode 0xf3. */
16078FNIEMOP_DEF(iemOp_repe)
16079{
16080 /* This overrides any previous REPNE prefix. */
16081 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
16082 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe");
16083 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
16084
16085 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16086 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16087}
16088
16089
16090/** Opcode 0xf4. */
16091FNIEMOP_DEF(iemOp_hlt)
16092{
16093 IEMOP_HLP_NO_LOCK_PREFIX();
16094 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
16095}
16096
16097
16098/** Opcode 0xf5. */
16099FNIEMOP_DEF(iemOp_cmc)
16100{
16101 IEMOP_MNEMONIC("cmc");
16102 IEMOP_HLP_NO_LOCK_PREFIX();
16103 IEM_MC_BEGIN(0, 0);
16104 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
16105 IEM_MC_ADVANCE_RIP();
16106 IEM_MC_END();
16107 return VINF_SUCCESS;
16108}
16109
16110
16111/**
16112 * Common implementation of 'inc/dec/not/neg Eb'.
16113 *
16114 * @param bRm The RM byte.
16115 * @param pImpl The instruction implementation.
16116 */
16117FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16118{
16119 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16120 {
16121 /* register access */
16122 IEM_MC_BEGIN(2, 0);
16123 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16124 IEM_MC_ARG(uint32_t *, pEFlags, 1);
16125 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16126 IEM_MC_REF_EFLAGS(pEFlags);
16127 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16128 IEM_MC_ADVANCE_RIP();
16129 IEM_MC_END();
16130 }
16131 else
16132 {
16133 /* memory access. */
16134 IEM_MC_BEGIN(2, 2);
16135 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16136 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16137 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16138
16139 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16140 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16141 IEM_MC_FETCH_EFLAGS(EFlags);
16142 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16143 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16144 else
16145 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
16146
16147 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
16148 IEM_MC_COMMIT_EFLAGS(EFlags);
16149 IEM_MC_ADVANCE_RIP();
16150 IEM_MC_END();
16151 }
16152 return VINF_SUCCESS;
16153}
16154
16155
16156/**
16157 * Common implementation of 'inc/dec/not/neg Ev'.
16158 *
16159 * @param bRm The RM byte.
16160 * @param pImpl The instruction implementation.
16161 */
16162FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16163{
16164 /* Registers are handled by a common worker. */
16165 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16166 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16167
16168 /* Memory we do here. */
16169 switch (pIemCpu->enmEffOpSize)
16170 {
16171 case IEMMODE_16BIT:
16172 IEM_MC_BEGIN(2, 2);
16173 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16174 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16175 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16176
16177 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16178 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16179 IEM_MC_FETCH_EFLAGS(EFlags);
16180 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16181 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
16182 else
16183 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
16184
16185 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
16186 IEM_MC_COMMIT_EFLAGS(EFlags);
16187 IEM_MC_ADVANCE_RIP();
16188 IEM_MC_END();
16189 return VINF_SUCCESS;
16190
16191 case IEMMODE_32BIT:
16192 IEM_MC_BEGIN(2, 2);
16193 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16194 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16195 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16196
16197 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16198 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16199 IEM_MC_FETCH_EFLAGS(EFlags);
16200 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16201 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
16202 else
16203 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
16204
16205 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
16206 IEM_MC_COMMIT_EFLAGS(EFlags);
16207 IEM_MC_ADVANCE_RIP();
16208 IEM_MC_END();
16209 return VINF_SUCCESS;
16210
16211 case IEMMODE_64BIT:
16212 IEM_MC_BEGIN(2, 2);
16213 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16214 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16215 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16216
16217 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16218 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16219 IEM_MC_FETCH_EFLAGS(EFlags);
16220 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16221 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
16222 else
16223 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
16224
16225 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
16226 IEM_MC_COMMIT_EFLAGS(EFlags);
16227 IEM_MC_ADVANCE_RIP();
16228 IEM_MC_END();
16229 return VINF_SUCCESS;
16230
16231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16232 }
16233}
16234
16235
16236/** Opcode 0xf6 /0. */
16237FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
16238{
16239 IEMOP_MNEMONIC("test Eb,Ib");
16240 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16241
16242 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16243 {
16244 /* register access */
16245 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16246 IEMOP_HLP_NO_LOCK_PREFIX();
16247
16248 IEM_MC_BEGIN(3, 0);
16249 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16250 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
16251 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16252 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16253 IEM_MC_REF_EFLAGS(pEFlags);
16254 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16255 IEM_MC_ADVANCE_RIP();
16256 IEM_MC_END();
16257 }
16258 else
16259 {
16260 /* memory access. */
16261 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16262
16263 IEM_MC_BEGIN(3, 2);
16264 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16265 IEM_MC_ARG(uint8_t, u8Src, 1);
16266 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16267 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16268
16269 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
16270 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16271 IEM_MC_ASSIGN(u8Src, u8Imm);
16272 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16273 IEM_MC_FETCH_EFLAGS(EFlags);
16274 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16275
16276 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
16277 IEM_MC_COMMIT_EFLAGS(EFlags);
16278 IEM_MC_ADVANCE_RIP();
16279 IEM_MC_END();
16280 }
16281 return VINF_SUCCESS;
16282}
16283
16284
16285/** Opcode 0xf7 /0. */
16286FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
16287{
16288 IEMOP_MNEMONIC("test Ev,Iv");
16289 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16290 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16291
16292 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16293 {
16294 /* register access */
16295 switch (pIemCpu->enmEffOpSize)
16296 {
16297 case IEMMODE_16BIT:
16298 {
16299 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16300 IEM_MC_BEGIN(3, 0);
16301 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16302 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
16303 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16304 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16305 IEM_MC_REF_EFLAGS(pEFlags);
16306 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16307 IEM_MC_ADVANCE_RIP();
16308 IEM_MC_END();
16309 return VINF_SUCCESS;
16310 }
16311
16312 case IEMMODE_32BIT:
16313 {
16314 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16315 IEM_MC_BEGIN(3, 0);
16316 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16317 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
16318 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16319 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16320 IEM_MC_REF_EFLAGS(pEFlags);
16321 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16322 /* No clearing the high dword here - test doesn't write back the result. */
16323 IEM_MC_ADVANCE_RIP();
16324 IEM_MC_END();
16325 return VINF_SUCCESS;
16326 }
16327
16328 case IEMMODE_64BIT:
16329 {
16330 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16331 IEM_MC_BEGIN(3, 0);
16332 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16333 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
16334 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16335 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16336 IEM_MC_REF_EFLAGS(pEFlags);
16337 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16338 IEM_MC_ADVANCE_RIP();
16339 IEM_MC_END();
16340 return VINF_SUCCESS;
16341 }
16342
16343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16344 }
16345 }
16346 else
16347 {
16348 /* memory access. */
16349 switch (pIemCpu->enmEffOpSize)
16350 {
16351 case IEMMODE_16BIT:
16352 {
16353 IEM_MC_BEGIN(3, 2);
16354 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16355 IEM_MC_ARG(uint16_t, u16Src, 1);
16356 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16357 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16358
16359 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
16360 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16361 IEM_MC_ASSIGN(u16Src, u16Imm);
16362 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16363 IEM_MC_FETCH_EFLAGS(EFlags);
16364 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16365
16366 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
16367 IEM_MC_COMMIT_EFLAGS(EFlags);
16368 IEM_MC_ADVANCE_RIP();
16369 IEM_MC_END();
16370 return VINF_SUCCESS;
16371 }
16372
16373 case IEMMODE_32BIT:
16374 {
16375 IEM_MC_BEGIN(3, 2);
16376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16377 IEM_MC_ARG(uint32_t, u32Src, 1);
16378 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16380
16381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16382 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16383 IEM_MC_ASSIGN(u32Src, u32Imm);
16384 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16385 IEM_MC_FETCH_EFLAGS(EFlags);
16386 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16387
16388 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
16389 IEM_MC_COMMIT_EFLAGS(EFlags);
16390 IEM_MC_ADVANCE_RIP();
16391 IEM_MC_END();
16392 return VINF_SUCCESS;
16393 }
16394
16395 case IEMMODE_64BIT:
16396 {
16397 IEM_MC_BEGIN(3, 2);
16398 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16399 IEM_MC_ARG(uint64_t, u64Src, 1);
16400 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16401 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16402
16403 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16404 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16405 IEM_MC_ASSIGN(u64Src, u64Imm);
16406 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16407 IEM_MC_FETCH_EFLAGS(EFlags);
16408 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16409
16410 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
16411 IEM_MC_COMMIT_EFLAGS(EFlags);
16412 IEM_MC_ADVANCE_RIP();
16413 IEM_MC_END();
16414 return VINF_SUCCESS;
16415 }
16416
16417 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16418 }
16419 }
16420}
16421
16422
16423/** Opcode 0xf6 /4, /5, /6 and /7. */
16424FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
16425{
16426 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16427
16428 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16429 {
16430 /* register access */
16431 IEMOP_HLP_NO_LOCK_PREFIX();
16432 IEM_MC_BEGIN(3, 1);
16433 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16434 IEM_MC_ARG(uint8_t, u8Value, 1);
16435 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16436 IEM_MC_LOCAL(int32_t, rc);
16437
16438 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16439 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16440 IEM_MC_REF_EFLAGS(pEFlags);
16441 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
16442 IEM_MC_IF_LOCAL_IS_Z(rc) {
16443 IEM_MC_ADVANCE_RIP();
16444 } IEM_MC_ELSE() {
16445 IEM_MC_RAISE_DIVIDE_ERROR();
16446 } IEM_MC_ENDIF();
16447
16448 IEM_MC_END();
16449 }
16450 else
16451 {
16452 /* memory access. */
16453 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16454
16455 IEM_MC_BEGIN(3, 2);
16456 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16457 IEM_MC_ARG(uint8_t, u8Value, 1);
16458 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16459 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16460 IEM_MC_LOCAL(int32_t, rc);
16461
16462 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16463 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
16464 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16465 IEM_MC_REF_EFLAGS(pEFlags);
16466 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
16467 IEM_MC_IF_LOCAL_IS_Z(rc) {
16468 IEM_MC_ADVANCE_RIP();
16469 } IEM_MC_ELSE() {
16470 IEM_MC_RAISE_DIVIDE_ERROR();
16471 } IEM_MC_ENDIF();
16472
16473 IEM_MC_END();
16474 }
16475 return VINF_SUCCESS;
16476}
16477
16478
16479/** Opcode 0xf7 /4, /5, /6 and /7. */
16480FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
16481{
16482 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16483 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16484
16485 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16486 {
16487 /* register access */
16488 switch (pIemCpu->enmEffOpSize)
16489 {
16490 case IEMMODE_16BIT:
16491 {
16492 IEMOP_HLP_NO_LOCK_PREFIX();
16493 IEM_MC_BEGIN(4, 1);
16494 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16495 IEM_MC_ARG(uint16_t *, pu16DX, 1);
16496 IEM_MC_ARG(uint16_t, u16Value, 2);
16497 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16498 IEM_MC_LOCAL(int32_t, rc);
16499
16500 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16501 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16502 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
16503 IEM_MC_REF_EFLAGS(pEFlags);
16504 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
16505 IEM_MC_IF_LOCAL_IS_Z(rc) {
16506 IEM_MC_ADVANCE_RIP();
16507 } IEM_MC_ELSE() {
16508 IEM_MC_RAISE_DIVIDE_ERROR();
16509 } IEM_MC_ENDIF();
16510
16511 IEM_MC_END();
16512 return VINF_SUCCESS;
16513 }
16514
16515 case IEMMODE_32BIT:
16516 {
16517 IEMOP_HLP_NO_LOCK_PREFIX();
16518 IEM_MC_BEGIN(4, 1);
16519 IEM_MC_ARG(uint32_t *, pu32AX, 0);
16520 IEM_MC_ARG(uint32_t *, pu32DX, 1);
16521 IEM_MC_ARG(uint32_t, u32Value, 2);
16522 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16523 IEM_MC_LOCAL(int32_t, rc);
16524
16525 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16526 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
16527 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
16528 IEM_MC_REF_EFLAGS(pEFlags);
16529 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
16530 IEM_MC_IF_LOCAL_IS_Z(rc) {
16531 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
16532 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
16533 IEM_MC_ADVANCE_RIP();
16534 } IEM_MC_ELSE() {
16535 IEM_MC_RAISE_DIVIDE_ERROR();
16536 } IEM_MC_ENDIF();
16537
16538 IEM_MC_END();
16539 return VINF_SUCCESS;
16540 }
16541
16542 case IEMMODE_64BIT:
16543 {
16544 IEMOP_HLP_NO_LOCK_PREFIX();
16545 IEM_MC_BEGIN(4, 1);
16546 IEM_MC_ARG(uint64_t *, pu64AX, 0);
16547 IEM_MC_ARG(uint64_t *, pu64DX, 1);
16548 IEM_MC_ARG(uint64_t, u64Value, 2);
16549 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16550 IEM_MC_LOCAL(int32_t, rc);
16551
16552 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16553 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
16554 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
16555 IEM_MC_REF_EFLAGS(pEFlags);
16556 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
16557 IEM_MC_IF_LOCAL_IS_Z(rc) {
16558 IEM_MC_ADVANCE_RIP();
16559 } IEM_MC_ELSE() {
16560 IEM_MC_RAISE_DIVIDE_ERROR();
16561 } IEM_MC_ENDIF();
16562
16563 IEM_MC_END();
16564 return VINF_SUCCESS;
16565 }
16566
16567 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16568 }
16569 }
16570 else
16571 {
16572 /* memory access. */
16573 switch (pIemCpu->enmEffOpSize)
16574 {
16575 case IEMMODE_16BIT:
16576 {
16577 IEMOP_HLP_NO_LOCK_PREFIX();
16578 IEM_MC_BEGIN(4, 2);
16579 IEM_MC_ARG(uint16_t *, pu16AX, 0);
16580 IEM_MC_ARG(uint16_t *, pu16DX, 1);
16581 IEM_MC_ARG(uint16_t, u16Value, 2);
16582 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16583 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16584 IEM_MC_LOCAL(int32_t, rc);
16585
16586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16587 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
16588 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
16589 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
16590 IEM_MC_REF_EFLAGS(pEFlags);
16591 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
16592 IEM_MC_IF_LOCAL_IS_Z(rc) {
16593 IEM_MC_ADVANCE_RIP();
16594 } IEM_MC_ELSE() {
16595 IEM_MC_RAISE_DIVIDE_ERROR();
16596 } IEM_MC_ENDIF();
16597
16598 IEM_MC_END();
16599 return VINF_SUCCESS;
16600 }
16601
16602 case IEMMODE_32BIT:
16603 {
16604 IEMOP_HLP_NO_LOCK_PREFIX();
16605 IEM_MC_BEGIN(4, 2);
16606 IEM_MC_ARG(uint32_t *, pu32AX, 0);
16607 IEM_MC_ARG(uint32_t *, pu32DX, 1);
16608 IEM_MC_ARG(uint32_t, u32Value, 2);
16609 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16610 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16611 IEM_MC_LOCAL(int32_t, rc);
16612
16613 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16614 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
16615 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
16616 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
16617 IEM_MC_REF_EFLAGS(pEFlags);
16618 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
16619 IEM_MC_IF_LOCAL_IS_Z(rc) {
16620 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
16621 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
16622 IEM_MC_ADVANCE_RIP();
16623 } IEM_MC_ELSE() {
16624 IEM_MC_RAISE_DIVIDE_ERROR();
16625 } IEM_MC_ENDIF();
16626
16627 IEM_MC_END();
16628 return VINF_SUCCESS;
16629 }
16630
16631 case IEMMODE_64BIT:
16632 {
16633 IEMOP_HLP_NO_LOCK_PREFIX();
16634 IEM_MC_BEGIN(4, 2);
16635 IEM_MC_ARG(uint64_t *, pu64AX, 0);
16636 IEM_MC_ARG(uint64_t *, pu64DX, 1);
16637 IEM_MC_ARG(uint64_t, u64Value, 2);
16638 IEM_MC_ARG(uint32_t *, pEFlags, 3);
16639 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16640 IEM_MC_LOCAL(int32_t, rc);
16641
16642 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16643 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
16644 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
16645 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
16646 IEM_MC_REF_EFLAGS(pEFlags);
16647 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
16648 IEM_MC_IF_LOCAL_IS_Z(rc) {
16649 IEM_MC_ADVANCE_RIP();
16650 } IEM_MC_ELSE() {
16651 IEM_MC_RAISE_DIVIDE_ERROR();
16652 } IEM_MC_ENDIF();
16653
16654 IEM_MC_END();
16655 return VINF_SUCCESS;
16656 }
16657
16658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16659 }
16660 }
16661}
16662
16663/** Opcode 0xf6. */
16664FNIEMOP_DEF(iemOp_Grp3_Eb)
16665{
16666 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16667 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16668 {
16669 case 0:
16670 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
16671 case 1:
16672 return IEMOP_RAISE_INVALID_OPCODE();
16673 case 2:
16674 IEMOP_MNEMONIC("not Eb");
16675 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
16676 case 3:
16677 IEMOP_MNEMONIC("neg Eb");
16678 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
16679 case 4:
16680 IEMOP_MNEMONIC("mul Eb");
16681 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16682 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
16683 case 5:
16684 IEMOP_MNEMONIC("imul Eb");
16685 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16686 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
16687 case 6:
16688 IEMOP_MNEMONIC("div Eb");
16689 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16690 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
16691 case 7:
16692 IEMOP_MNEMONIC("idiv Eb");
16693 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16694 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
16695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16696 }
16697}
16698
16699
16700/** Opcode 0xf7. */
16701FNIEMOP_DEF(iemOp_Grp3_Ev)
16702{
16703 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16704 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16705 {
16706 case 0:
16707 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
16708 case 1:
16709 return IEMOP_RAISE_INVALID_OPCODE();
16710 case 2:
16711 IEMOP_MNEMONIC("not Ev");
16712 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
16713 case 3:
16714 IEMOP_MNEMONIC("neg Ev");
16715 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
16716 case 4:
16717 IEMOP_MNEMONIC("mul Ev");
16718 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16719 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
16720 case 5:
16721 IEMOP_MNEMONIC("imul Ev");
16722 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
16723 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
16724 case 6:
16725 IEMOP_MNEMONIC("div Ev");
16726 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16727 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
16728 case 7:
16729 IEMOP_MNEMONIC("idiv Ev");
16730 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
16731 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
16732 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16733 }
16734}
16735
16736
16737/** Opcode 0xf8. */
16738FNIEMOP_DEF(iemOp_clc)
16739{
16740 IEMOP_MNEMONIC("clc");
16741 IEMOP_HLP_NO_LOCK_PREFIX();
16742 IEM_MC_BEGIN(0, 0);
16743 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
16744 IEM_MC_ADVANCE_RIP();
16745 IEM_MC_END();
16746 return VINF_SUCCESS;
16747}
16748
16749
16750/** Opcode 0xf9. */
16751FNIEMOP_DEF(iemOp_stc)
16752{
16753 IEMOP_MNEMONIC("stc");
16754 IEMOP_HLP_NO_LOCK_PREFIX();
16755 IEM_MC_BEGIN(0, 0);
16756 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
16757 IEM_MC_ADVANCE_RIP();
16758 IEM_MC_END();
16759 return VINF_SUCCESS;
16760}
16761
16762
16763/** Opcode 0xfa. */
16764FNIEMOP_DEF(iemOp_cli)
16765{
16766 IEMOP_MNEMONIC("cli");
16767 IEMOP_HLP_NO_LOCK_PREFIX();
16768 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
16769}
16770
16771
16772FNIEMOP_DEF(iemOp_sti)
16773{
16774 IEMOP_MNEMONIC("sti");
16775 IEMOP_HLP_NO_LOCK_PREFIX();
16776 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
16777}
16778
16779
16780/** Opcode 0xfc. */
16781FNIEMOP_DEF(iemOp_cld)
16782{
16783 IEMOP_MNEMONIC("cld");
16784 IEMOP_HLP_NO_LOCK_PREFIX();
16785 IEM_MC_BEGIN(0, 0);
16786 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
16787 IEM_MC_ADVANCE_RIP();
16788 IEM_MC_END();
16789 return VINF_SUCCESS;
16790}
16791
16792
16793/** Opcode 0xfd. */
16794FNIEMOP_DEF(iemOp_std)
16795{
16796 IEMOP_MNEMONIC("std");
16797 IEMOP_HLP_NO_LOCK_PREFIX();
16798 IEM_MC_BEGIN(0, 0);
16799 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
16800 IEM_MC_ADVANCE_RIP();
16801 IEM_MC_END();
16802 return VINF_SUCCESS;
16803}
16804
16805
16806/** Opcode 0xfe. */
16807FNIEMOP_DEF(iemOp_Grp4)
16808{
16809 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16810 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16811 {
16812 case 0:
16813 IEMOP_MNEMONIC("inc Ev");
16814 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
16815 case 1:
16816 IEMOP_MNEMONIC("dec Ev");
16817 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
16818 default:
16819 IEMOP_MNEMONIC("grp4-ud");
16820 return IEMOP_RAISE_INVALID_OPCODE();
16821 }
16822}
16823
16824
16825/**
16826 * Opcode 0xff /2.
16827 * @param bRm The RM byte.
16828 */
16829FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
16830{
16831 IEMOP_MNEMONIC("calln Ev");
16832 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
16833 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16834
16835 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16836 {
16837 /* The new RIP is taken from a register. */
16838 switch (pIemCpu->enmEffOpSize)
16839 {
16840 case IEMMODE_16BIT:
16841 IEM_MC_BEGIN(1, 0);
16842 IEM_MC_ARG(uint16_t, u16Target, 0);
16843 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16844 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
16845 IEM_MC_END()
16846 return VINF_SUCCESS;
16847
16848 case IEMMODE_32BIT:
16849 IEM_MC_BEGIN(1, 0);
16850 IEM_MC_ARG(uint32_t, u32Target, 0);
16851 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16852 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
16853 IEM_MC_END()
16854 return VINF_SUCCESS;
16855
16856 case IEMMODE_64BIT:
16857 IEM_MC_BEGIN(1, 0);
16858 IEM_MC_ARG(uint64_t, u64Target, 0);
16859 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16860 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
16861 IEM_MC_END()
16862 return VINF_SUCCESS;
16863
16864 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16865 }
16866 }
16867 else
16868 {
16869 /* The new RIP is taken from a register. */
16870 switch (pIemCpu->enmEffOpSize)
16871 {
16872 case IEMMODE_16BIT:
16873 IEM_MC_BEGIN(1, 1);
16874 IEM_MC_ARG(uint16_t, u16Target, 0);
16875 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16876 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16877 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16878 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
16879 IEM_MC_END()
16880 return VINF_SUCCESS;
16881
16882 case IEMMODE_32BIT:
16883 IEM_MC_BEGIN(1, 1);
16884 IEM_MC_ARG(uint32_t, u32Target, 0);
16885 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16886 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16887 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16888 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
16889 IEM_MC_END()
16890 return VINF_SUCCESS;
16891
16892 case IEMMODE_64BIT:
16893 IEM_MC_BEGIN(1, 1);
16894 IEM_MC_ARG(uint64_t, u64Target, 0);
16895 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16896 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16897 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
16898 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
16899 IEM_MC_END()
16900 return VINF_SUCCESS;
16901
16902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16903 }
16904 }
16905}
16906
16907typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize);
16908
16909FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImpl)
16910{
16911 /* Registers? How?? */
16912 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16913 return IEMOP_RAISE_INVALID_OPCODE(); /* callf eax is not legal */
16914
16915 /* Far pointer loaded from memory. */
16916 switch (pIemCpu->enmEffOpSize)
16917 {
16918 case IEMMODE_16BIT:
16919 IEM_MC_BEGIN(3, 1);
16920 IEM_MC_ARG(uint16_t, u16Sel, 0);
16921 IEM_MC_ARG(uint16_t, offSeg, 1);
16922 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
16923 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16924 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16925 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16926 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16927 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
16928 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16929 IEM_MC_END();
16930 return VINF_SUCCESS;
16931
16932 case IEMMODE_64BIT:
16933 /** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1)
16934 * and will apparently ignore REX.W, at least for the jmp far qword [rsp]
16935 * and call far qword [rsp] encodings. */
16936 if (!IEM_IS_GUEST_CPU_AMD(pIemCpu))
16937 {
16938 IEM_MC_BEGIN(3, 1);
16939 IEM_MC_ARG(uint16_t, u16Sel, 0);
16940 IEM_MC_ARG(uint64_t, offSeg, 1);
16941 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
16942 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16943 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16944 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16945 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16946 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
16947 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16948 IEM_MC_END();
16949 return VINF_SUCCESS;
16950 }
16951 /* AMD falls thru. */
16952
16953 case IEMMODE_32BIT:
16954 IEM_MC_BEGIN(3, 1);
16955 IEM_MC_ARG(uint16_t, u16Sel, 0);
16956 IEM_MC_ARG(uint32_t, offSeg, 1);
16957 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
16958 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16959 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16960 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16961 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
16962 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
16963 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
16964 IEM_MC_END();
16965 return VINF_SUCCESS;
16966
16967 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16968 }
16969}
16970
16971
16972/**
16973 * Opcode 0xff /3.
16974 * @param bRm The RM byte.
16975 */
16976FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
16977{
16978 IEMOP_MNEMONIC("callf Ep");
16979 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_callf);
16980}
16981
16982
16983/**
16984 * Opcode 0xff /4.
16985 * @param bRm The RM byte.
16986 */
16987FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
16988{
16989 IEMOP_MNEMONIC("jmpn Ev");
16990 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
16991 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16992
16993 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16994 {
16995 /* The new RIP is taken from a register. */
16996 switch (pIemCpu->enmEffOpSize)
16997 {
16998 case IEMMODE_16BIT:
16999 IEM_MC_BEGIN(0, 1);
17000 IEM_MC_LOCAL(uint16_t, u16Target);
17001 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17002 IEM_MC_SET_RIP_U16(u16Target);
17003 IEM_MC_END()
17004 return VINF_SUCCESS;
17005
17006 case IEMMODE_32BIT:
17007 IEM_MC_BEGIN(0, 1);
17008 IEM_MC_LOCAL(uint32_t, u32Target);
17009 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17010 IEM_MC_SET_RIP_U32(u32Target);
17011 IEM_MC_END()
17012 return VINF_SUCCESS;
17013
17014 case IEMMODE_64BIT:
17015 IEM_MC_BEGIN(0, 1);
17016 IEM_MC_LOCAL(uint64_t, u64Target);
17017 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17018 IEM_MC_SET_RIP_U64(u64Target);
17019 IEM_MC_END()
17020 return VINF_SUCCESS;
17021
17022 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17023 }
17024 }
17025 else
17026 {
17027 /* The new RIP is taken from a memory location. */
17028 switch (pIemCpu->enmEffOpSize)
17029 {
17030 case IEMMODE_16BIT:
17031 IEM_MC_BEGIN(0, 2);
17032 IEM_MC_LOCAL(uint16_t, u16Target);
17033 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17034 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17035 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17036 IEM_MC_SET_RIP_U16(u16Target);
17037 IEM_MC_END()
17038 return VINF_SUCCESS;
17039
17040 case IEMMODE_32BIT:
17041 IEM_MC_BEGIN(0, 2);
17042 IEM_MC_LOCAL(uint32_t, u32Target);
17043 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17044 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17045 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17046 IEM_MC_SET_RIP_U32(u32Target);
17047 IEM_MC_END()
17048 return VINF_SUCCESS;
17049
17050 case IEMMODE_64BIT:
17051 IEM_MC_BEGIN(0, 2);
17052 IEM_MC_LOCAL(uint64_t, u64Target);
17053 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17054 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17055 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17056 IEM_MC_SET_RIP_U64(u64Target);
17057 IEM_MC_END()
17058 return VINF_SUCCESS;
17059
17060 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17061 }
17062 }
17063}
17064
17065
17066/**
17067 * Opcode 0xff /5.
17068 * @param bRm The RM byte.
17069 */
17070FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
17071{
17072 IEMOP_MNEMONIC("jmpf Ep");
17073 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp);
17074}
17075
17076
17077/**
17078 * Opcode 0xff /6.
17079 * @param bRm The RM byte.
17080 */
17081FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
17082{
17083 IEMOP_MNEMONIC("push Ev");
17084 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
17085
17086 /* Registers are handled by a common worker. */
17087 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17088 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17089
17090 /* Memory we do here. */
17091 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
17092 switch (pIemCpu->enmEffOpSize)
17093 {
17094 case IEMMODE_16BIT:
17095 IEM_MC_BEGIN(0, 2);
17096 IEM_MC_LOCAL(uint16_t, u16Src);
17097 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17098 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17099 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17100 IEM_MC_PUSH_U16(u16Src);
17101 IEM_MC_ADVANCE_RIP();
17102 IEM_MC_END();
17103 return VINF_SUCCESS;
17104
17105 case IEMMODE_32BIT:
17106 IEM_MC_BEGIN(0, 2);
17107 IEM_MC_LOCAL(uint32_t, u32Src);
17108 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17109 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17110 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17111 IEM_MC_PUSH_U32(u32Src);
17112 IEM_MC_ADVANCE_RIP();
17113 IEM_MC_END();
17114 return VINF_SUCCESS;
17115
17116 case IEMMODE_64BIT:
17117 IEM_MC_BEGIN(0, 2);
17118 IEM_MC_LOCAL(uint64_t, u64Src);
17119 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17120 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17121 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17122 IEM_MC_PUSH_U64(u64Src);
17123 IEM_MC_ADVANCE_RIP();
17124 IEM_MC_END();
17125 return VINF_SUCCESS;
17126
17127 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17128 }
17129}
17130
17131
17132/** Opcode 0xff. */
17133FNIEMOP_DEF(iemOp_Grp5)
17134{
17135 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17136 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17137 {
17138 case 0:
17139 IEMOP_MNEMONIC("inc Ev");
17140 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
17141 case 1:
17142 IEMOP_MNEMONIC("dec Ev");
17143 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
17144 case 2:
17145 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
17146 case 3:
17147 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
17148 case 4:
17149 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
17150 case 5:
17151 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
17152 case 6:
17153 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
17154 case 7:
17155 IEMOP_MNEMONIC("grp5-ud");
17156 return IEMOP_RAISE_INVALID_OPCODE();
17157 }
17158 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
17159}
17160
17161
17162
17163const PFNIEMOP g_apfnOneByteMap[256] =
17164{
17165 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
17166 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
17167 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
17168 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
17169 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
17170 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
17171 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
17172 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
17173 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
17174 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
17175 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
17176 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
17177 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
17178 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
17179 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
17180 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
17181 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
17182 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
17183 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
17184 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
17185 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
17186 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
17187 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
17188 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
17189 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw_movsx_Gv_Ev,
17190 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
17191 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
17192 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
17193 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
17194 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
17195 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
17196 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
17197 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
17198 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
17199 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
17200 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_Grp1A,
17201 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
17202 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
17203 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
17204 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
17205 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
17206 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
17207 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
17208 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
17209 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
17210 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
17211 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
17212 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
17213 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
17214 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
17215 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
17216 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
17217 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
17218 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
17219 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
17220 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
17221 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
17222 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
17223 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
17224 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
17225 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
17226 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
17227 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
17228 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
17229};
17230
17231
17232/** @} */
17233
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette