VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstCommonBodyMacros.h@ 103852

Last change on this file since 103852 was 103828, checked in by vboxsync, 9 months ago

VMM/IEM: Implemented simple (whole sale) status flag up update skipping for arithmetic operations with native emitter. bugref:10375

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.9 KB
Line 
1/* $Id: IEMAllInstCommonBodyMacros.h 103828 2024-03-13 14:01:20Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Common Body Macros.
4 *
5 * This is placed in its own file without anything else in it, so that it can
6 * be digested by SimplerParser in IEMAllInstPython.py prior processing
7 * any of the other IEMAllInstruction*.cpp.h files. For instance
8 * IEMAllInstCommon.cpp.h wouldn't do as it defines several invalid
9 * instructions and such that could confuse the parser result.
10 */
11
12/*
13 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/**
36 * Special case body for word/dword/qword instruction like SUB and XOR that can
37 * be used to zero a register.
38 *
39 * This can be used both for the rv_rm and rm_rv forms since it's working on the
40 * same register.
41 */
42#define IEMOP_BODY_BINARY_rv_SAME_REG_ZERO(a_bRm) \
43 if ( (a_bRm >> X86_MODRM_REG_SHIFT) == ((a_bRm & X86_MODRM_RM_MASK) | (X86_MOD_REG << X86_MODRM_REG_SHIFT)) \
44 && pVCpu->iem.s.uRexReg == pVCpu->iem.s.uRexB) \
45 { \
46 switch (pVCpu->iem.s.enmEffOpSize) \
47 { \
48 case IEMMODE_16BIT: \
49 IEM_MC_BEGIN(1, 0, 0, 0); \
50 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
51 IEM_MC_STORE_GREG_U16_CONST(IEM_GET_MODRM_RM(pVCpu, a_bRm), 0); \
52 IEM_MC_LOCAL(uint32_t, fEFlags); \
53 IEM_MC_FETCH_EFLAGS(fEFlags); \
54 IEM_MC_AND_LOCAL_U32(fEFlags, ~(uint32_t)X86_EFL_STATUS_BITS); \
55 IEM_MC_OR_LOCAL_U32(fEFlags, X86_EFL_PF | X86_EFL_ZF); \
56 IEM_MC_COMMIT_EFLAGS(fEFlags); \
57 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
58 IEM_MC_END(); \
59 break; \
60 \
61 case IEMMODE_32BIT: \
62 IEM_MC_BEGIN(1, 0, IEM_MC_F_MIN_386, 0); \
63 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
64 IEM_MC_STORE_GREG_U32_CONST(IEM_GET_MODRM_RM(pVCpu, a_bRm), 0); \
65 IEM_MC_LOCAL(uint32_t, fEFlags); \
66 IEM_MC_FETCH_EFLAGS(fEFlags); \
67 IEM_MC_AND_LOCAL_U32(fEFlags, ~(uint32_t)X86_EFL_STATUS_BITS); \
68 IEM_MC_OR_LOCAL_U32(fEFlags, X86_EFL_PF | X86_EFL_ZF); \
69 IEM_MC_COMMIT_EFLAGS(fEFlags); \
70 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
71 IEM_MC_END(); \
72 break; \
73 \
74 case IEMMODE_64BIT: \
75 IEM_MC_BEGIN(1, 0, IEM_MC_F_64BIT, 0); \
76 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
77 IEM_MC_STORE_GREG_U64_CONST(IEM_GET_MODRM_RM(pVCpu, a_bRm), 0); \
78 IEM_MC_LOCAL(uint32_t, fEFlags); \
79 IEM_MC_FETCH_EFLAGS(fEFlags); \
80 IEM_MC_AND_LOCAL_U32(fEFlags, ~(uint32_t)X86_EFL_STATUS_BITS); \
81 IEM_MC_OR_LOCAL_U32(fEFlags, X86_EFL_PF | X86_EFL_ZF); \
82 IEM_MC_COMMIT_EFLAGS(fEFlags); \
83 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
84 IEM_MC_END(); \
85 break; \
86 \
87 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
88 } \
89 } ((void)0)
90
91/**
92 * Body for word/dword/qword instructions like ADD, AND, OR, ++ with a register
93 * as the destination.
94 *
95 * @note Used both in OneByte and TwoByte0f.
96 */
97#define IEMOP_BODY_BINARY_rv_rm(a_bRm, a_fnNormalU16, a_fnNormalU32, a_fnNormalU64, a_f16BitMcFlag, a_EmitterBasename, a_fNativeArchs) \
98 /* \
99 * If rm is denoting a register, no more instruction bytes. \
100 */ \
101 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
102 { \
103 switch (pVCpu->iem.s.enmEffOpSize) \
104 { \
105 case IEMMODE_16BIT: \
106 IEM_MC_BEGIN(3, 0, a_f16BitMcFlag, 0); \
107 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
108 IEM_MC_ARG(uint16_t, u16Src, 1); \
109 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
110 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
111 IEM_MC_LOCAL(uint16_t, u16Dst); \
112 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
113 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
114 IEM_MC_LOCAL(uint32_t, uEFlags); \
115 IEM_MC_FETCH_EFLAGS(uEFlags); \
116 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
117 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
118 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
119 } IEM_MC_NATIVE_ELSE() { \
120 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
121 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
122 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
123 IEM_MC_REF_EFLAGS(pEFlags); \
124 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
125 } IEM_MC_NATIVE_ENDIF(); \
126 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
127 IEM_MC_END(); \
128 break; \
129 \
130 case IEMMODE_32BIT: \
131 IEM_MC_BEGIN(3, 0, IEM_MC_F_MIN_386, 0); \
132 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
133 IEM_MC_ARG(uint32_t, u32Src, 1); \
134 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
135 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
136 IEM_MC_LOCAL(uint32_t, u32Dst); \
137 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
138 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
139 IEM_MC_LOCAL(uint32_t, uEFlags); \
140 IEM_MC_FETCH_EFLAGS(uEFlags); \
141 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
142 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
143 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
144 } IEM_MC_NATIVE_ELSE() { \
145 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
146 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
147 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
148 IEM_MC_REF_EFLAGS(pEFlags); \
149 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
150 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
151 } IEM_MC_NATIVE_ENDIF(); \
152 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
153 IEM_MC_END(); \
154 break; \
155 \
156 case IEMMODE_64BIT: \
157 IEM_MC_BEGIN(3, 0, IEM_MC_F_64BIT, 0); \
158 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
159 IEM_MC_ARG(uint64_t, u64Src, 1); \
160 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
161 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
162 IEM_MC_LOCAL(uint64_t, u64Dst); \
163 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
164 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
165 IEM_MC_LOCAL(uint32_t, uEFlags); \
166 IEM_MC_FETCH_EFLAGS(uEFlags); \
167 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
168 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
169 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
170 } IEM_MC_NATIVE_ELSE() { \
171 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
172 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
173 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
174 IEM_MC_REF_EFLAGS(pEFlags); \
175 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
176 } IEM_MC_NATIVE_ENDIF(); \
177 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
178 IEM_MC_END(); \
179 break; \
180 \
181 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
182 } \
183 } \
184 else \
185 { \
186 /* \
187 * We're accessing memory. \
188 */ \
189 switch (pVCpu->iem.s.enmEffOpSize) \
190 { \
191 case IEMMODE_16BIT: \
192 IEM_MC_BEGIN(3, 1, a_f16BitMcFlag, 0); \
193 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
194 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
195 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
196 IEM_MC_ARG(uint16_t, u16Src, 1); \
197 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
198 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
199 IEM_MC_LOCAL(uint16_t, u16Dst); \
200 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
201 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
202 IEM_MC_LOCAL(uint32_t, uEFlags); \
203 IEM_MC_FETCH_EFLAGS(uEFlags); \
204 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
205 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
206 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
207 } IEM_MC_NATIVE_ELSE() { \
208 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
209 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
210 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
211 IEM_MC_REF_EFLAGS(pEFlags); \
212 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
213 } IEM_MC_NATIVE_ENDIF(); \
214 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
215 IEM_MC_END(); \
216 break; \
217 \
218 case IEMMODE_32BIT: \
219 IEM_MC_BEGIN(3, 1, IEM_MC_F_MIN_386, 0); \
220 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
221 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
222 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
223 IEM_MC_ARG(uint32_t, u32Src, 1); \
224 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
225 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
226 IEM_MC_LOCAL(uint32_t, u32Dst); \
227 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
228 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
229 IEM_MC_LOCAL(uint32_t, uEFlags); \
230 IEM_MC_FETCH_EFLAGS(uEFlags); \
231 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
232 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
233 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
234 } IEM_MC_NATIVE_ELSE() { \
235 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
236 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
237 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
238 IEM_MC_REF_EFLAGS(pEFlags); \
239 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
240 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
241 } IEM_MC_NATIVE_ENDIF(); \
242 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
243 IEM_MC_END(); \
244 break; \
245 \
246 case IEMMODE_64BIT: \
247 IEM_MC_BEGIN(3, 1, IEM_MC_F_64BIT, 0); \
248 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
249 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
250 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
251 IEM_MC_ARG(uint64_t, u64Src, 1); \
252 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
253 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
254 IEM_MC_LOCAL(uint64_t, u64Dst); \
255 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
256 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
257 IEM_MC_LOCAL(uint32_t, uEFlags); \
258 IEM_MC_FETCH_EFLAGS(uEFlags); \
259 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
260 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
261 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
262 } IEM_MC_NATIVE_ELSE() { \
263 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
264 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
265 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
266 IEM_MC_REF_EFLAGS(pEFlags); \
267 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
268 } IEM_MC_NATIVE_ENDIF(); \
269 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
270 IEM_MC_END(); \
271 break; \
272 \
273 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
274 } \
275 } \
276 (void)0
277
278/**
279 * Body for word/dword/qword the instruction CMP, ++ with a register as the
280 * destination.
281 *
282 * @note Used both in OneByte and TwoByte0f.
283 */
284#define IEMOP_BODY_BINARY_rv_rm_RO(a_bRm, a_InsNm, a_fNativeArchs) \
285 /* \
286 * If rm is denoting a register, no more instruction bytes. \
287 */ \
288 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
289 { \
290 switch (pVCpu->iem.s.enmEffOpSize) \
291 { \
292 case IEMMODE_16BIT: \
293 IEM_MC_BEGIN(3, 0, 0, 0); \
294 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
295 IEM_MC_ARG(uint16_t, u16Src, 1); \
296 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
297 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
298 IEM_MC_LOCAL(uint16_t, u16Dst); \
299 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
300 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
301 IEM_MC_LOCAL(uint32_t, uEFlags); \
302 IEM_MC_FETCH_EFLAGS(uEFlags); \
303 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
304 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
305 } IEM_MC_NATIVE_ELSE() { \
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
307 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
308 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
309 IEM_MC_REF_EFLAGS(pEFlags); \
310 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \
311 } IEM_MC_NATIVE_ENDIF(); \
312 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
313 IEM_MC_END(); \
314 break; \
315 \
316 case IEMMODE_32BIT: \
317 IEM_MC_BEGIN(3, 0, IEM_MC_F_MIN_386, 0); \
318 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
319 IEM_MC_ARG(uint32_t, u32Src, 1); \
320 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
321 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
322 IEM_MC_LOCAL(uint32_t, u32Dst); \
323 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
324 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
325 IEM_MC_LOCAL(uint32_t, uEFlags); \
326 IEM_MC_FETCH_EFLAGS(uEFlags); \
327 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
328 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
329 } IEM_MC_NATIVE_ELSE() { \
330 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
331 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
332 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
333 IEM_MC_REF_EFLAGS(pEFlags); \
334 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \
335 } IEM_MC_NATIVE_ENDIF(); \
336 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
337 IEM_MC_END(); \
338 break; \
339 \
340 case IEMMODE_64BIT: \
341 IEM_MC_BEGIN(3, 0, IEM_MC_F_64BIT, 0); \
342 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
343 IEM_MC_ARG(uint64_t, u64Src, 1); \
344 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
345 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
346 IEM_MC_LOCAL(uint64_t, u64Dst); \
347 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
348 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
349 IEM_MC_LOCAL(uint32_t, uEFlags); \
350 IEM_MC_FETCH_EFLAGS(uEFlags); \
351 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
352 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
353 } IEM_MC_NATIVE_ELSE() { \
354 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
355 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
356 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
357 IEM_MC_REF_EFLAGS(pEFlags); \
358 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \
359 } IEM_MC_NATIVE_ENDIF(); \
360 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
361 IEM_MC_END(); \
362 break; \
363 \
364 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
365 } \
366 } \
367 else \
368 { \
369 /* \
370 * We're accessing memory. \
371 */ \
372 switch (pVCpu->iem.s.enmEffOpSize) \
373 { \
374 case IEMMODE_16BIT: \
375 IEM_MC_BEGIN(3, 1, 0, 0); \
376 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
377 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
378 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
379 IEM_MC_ARG(uint16_t, u16Src, 1); \
380 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
381 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
382 IEM_MC_LOCAL(uint16_t, u16Dst); \
383 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
384 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
385 IEM_MC_LOCAL(uint32_t, uEFlags); \
386 IEM_MC_FETCH_EFLAGS(uEFlags); \
387 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
388 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
389 } IEM_MC_NATIVE_ELSE() { \
390 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
391 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
392 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
393 IEM_MC_REF_EFLAGS(pEFlags); \
394 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \
395 } IEM_MC_NATIVE_ENDIF(); \
396 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
397 IEM_MC_END(); \
398 break; \
399 \
400 case IEMMODE_32BIT: \
401 IEM_MC_BEGIN(3, 1, IEM_MC_F_MIN_386, 0); \
402 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
403 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
404 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
405 IEM_MC_ARG(uint32_t, u32Src, 1); \
406 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
407 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
408 IEM_MC_LOCAL(uint32_t, u32Dst); \
409 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
410 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
411 IEM_MC_LOCAL(uint32_t, uEFlags); \
412 IEM_MC_FETCH_EFLAGS(uEFlags); \
413 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
414 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
415 } IEM_MC_NATIVE_ELSE() { \
416 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
417 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
418 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
419 IEM_MC_REF_EFLAGS(pEFlags); \
420 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \
421 } IEM_MC_NATIVE_ENDIF(); \
422 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
423 IEM_MC_END(); \
424 break; \
425 \
426 case IEMMODE_64BIT: \
427 IEM_MC_BEGIN(3, 1, IEM_MC_F_64BIT, 0); \
428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
429 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
430 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
431 IEM_MC_ARG(uint64_t, u64Src, 1); \
432 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
433 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
434 IEM_MC_LOCAL(uint64_t, u64Dst); \
435 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
436 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
437 IEM_MC_LOCAL(uint32_t, uEFlags); \
438 IEM_MC_FETCH_EFLAGS(uEFlags); \
439 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
440 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
441 } IEM_MC_NATIVE_ELSE() { \
442 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
443 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
444 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
445 IEM_MC_REF_EFLAGS(pEFlags); \
446 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \
447 } IEM_MC_NATIVE_ENDIF(); \
448 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
449 IEM_MC_END(); \
450 break; \
451 \
452 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
453 } \
454 } \
455 (void)0
456
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette