VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstCommonBodyMacros.h@ 106212

Last change on this file since 106212 was 106200, checked in by vboxsync, 2 months ago

VMM/IEM: Refactored the xxxxx_r_r_efl functions to take the constant argument (cOpBits) as template argument. bugref:10720

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.8 KB
Line 
1/* $Id: IEMAllInstCommonBodyMacros.h 106200 2024-10-01 23:37:05Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Common Body Macros.
4 *
5 * This is placed in its own file without anything else in it, so that it can
6 * be digested by SimplerParser in IEMAllInstPython.py prior processing
7 * any of the other IEMAllInstruction*.cpp.h files. For instance
8 * IEMAllInstCommon.cpp.h wouldn't do as it defines several invalid
9 * instructions and such that could confuse the parser result.
10 */
11
12/*
13 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/**
36 * Body for word/dword/qword instructions like ADD, AND, OR, ++ with a register
37 * as the destination.
38 *
39 * @note Used both in OneByte and TwoByte0f.
40 */
41#define IEMOP_BODY_BINARY_rv_rm(a_bRm, a_fnNormalU16, a_fnNormalU32, a_fnNormalU64, a_f16BitMcFlag, a_EmitterBasename, a_fNativeArchs) \
42 /* \
43 * If rm is denoting a register, no more instruction bytes. \
44 */ \
45 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
46 { \
47 switch (pVCpu->iem.s.enmEffOpSize) \
48 { \
49 case IEMMODE_16BIT: \
50 IEM_MC_BEGIN(a_f16BitMcFlag, 0); \
51 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
52 IEM_MC_ARG(uint16_t, u16Src, 2); \
53 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
54 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
55 IEM_MC_LOCAL(uint16_t, u16Dst); \
56 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
57 IEM_MC_LOCAL_EFLAGS(uEFlags); \
58 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<16>, u16Dst, u16Src, uEFlags); \
59 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
60 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
61 } IEM_MC_NATIVE_ELSE() { \
62 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \
63 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
64 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \
65 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, a_fnNormalU16, fEFlagsIn, pu16Dst, u16Src); \
66 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \
67 } IEM_MC_NATIVE_ENDIF(); \
68 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
69 IEM_MC_END(); \
70 break; \
71 \
72 case IEMMODE_32BIT: \
73 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
74 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
75 IEM_MC_ARG(uint32_t, u32Src, 2); \
76 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
77 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
78 IEM_MC_LOCAL(uint32_t, u32Dst); \
79 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
80 IEM_MC_LOCAL_EFLAGS(uEFlags); \
81 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<32>, u32Dst, u32Src, uEFlags); \
82 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
83 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
84 } IEM_MC_NATIVE_ELSE() { \
85 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \
86 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
87 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \
88 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, a_fnNormalU32, fEFlagsIn, pu32Dst, u32Src); \
89 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
90 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \
91 } IEM_MC_NATIVE_ENDIF(); \
92 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
93 IEM_MC_END(); \
94 break; \
95 \
96 case IEMMODE_64BIT: \
97 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
98 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
99 IEM_MC_ARG(uint64_t, u64Src, 2); \
100 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
101 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
102 IEM_MC_LOCAL(uint64_t, u64Dst); \
103 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
104 IEM_MC_LOCAL_EFLAGS(uEFlags); \
105 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<64>, u64Dst, u64Src, uEFlags); \
106 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
107 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
108 } IEM_MC_NATIVE_ELSE() { \
109 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \
110 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
111 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \
112 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, a_fnNormalU64, fEFlagsIn, pu64Dst, u64Src); \
113 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \
114 } IEM_MC_NATIVE_ENDIF(); \
115 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
116 IEM_MC_END(); \
117 break; \
118 \
119 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
120 } \
121 } \
122 else \
123 { \
124 /* \
125 * We're accessing memory. \
126 */ \
127 switch (pVCpu->iem.s.enmEffOpSize) \
128 { \
129 case IEMMODE_16BIT: \
130 IEM_MC_BEGIN(a_f16BitMcFlag, 0); \
131 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
132 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
133 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
134 IEM_MC_ARG(uint16_t, u16Src, 2); \
135 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
136 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
137 IEM_MC_LOCAL(uint16_t, u16Dst); \
138 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
139 IEM_MC_LOCAL_EFLAGS(uEFlags); \
140 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<16>, u16Dst, u16Src, uEFlags); \
141 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
142 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
143 } IEM_MC_NATIVE_ELSE() { \
144 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \
145 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
146 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \
147 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, a_fnNormalU16, fEFlagsIn, pu16Dst, u16Src); \
148 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \
149 } IEM_MC_NATIVE_ENDIF(); \
150 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
151 IEM_MC_END(); \
152 break; \
153 \
154 case IEMMODE_32BIT: \
155 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
156 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
157 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
158 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
159 IEM_MC_ARG(uint32_t, u32Src, 2); \
160 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
161 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
162 IEM_MC_LOCAL(uint32_t, u32Dst); \
163 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
164 IEM_MC_LOCAL_EFLAGS(uEFlags); \
165 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<32>, u32Dst, u32Src, uEFlags); \
166 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
167 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
168 } IEM_MC_NATIVE_ELSE() { \
169 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \
170 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
171 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \
172 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, a_fnNormalU32, fEFlagsIn, pu32Dst, u32Src); \
173 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
174 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \
175 } IEM_MC_NATIVE_ENDIF(); \
176 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
177 IEM_MC_END(); \
178 break; \
179 \
180 case IEMMODE_64BIT: \
181 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
182 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
183 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
184 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
185 IEM_MC_ARG(uint64_t, u64Src, 2); \
186 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
187 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
188 IEM_MC_LOCAL(uint64_t, u64Dst); \
189 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
190 IEM_MC_LOCAL_EFLAGS(uEFlags); \
191 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<64>, u64Dst, u64Src, uEFlags); \
192 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
193 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
194 } IEM_MC_NATIVE_ELSE() { \
195 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \
196 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
197 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \
198 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, a_fnNormalU64, fEFlagsIn, pu64Dst, u64Src); \
199 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \
200 } IEM_MC_NATIVE_ENDIF(); \
201 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
202 IEM_MC_END(); \
203 break; \
204 \
205 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
206 } \
207 } \
208 (void)0
209
210
211/**
212 * Body for word/dword/qword instructions like ADD, AND, OR, ++ with a register
213 * as the destination.
214 *
215 * @note Used both in OneByte and TwoByte0f.
216 */
217#define IEMOP_BODY_BINARY_TODO_rv_rm(a_bRm, a_fnNormalU16, a_fnNormalU32, a_fnNormalU64, a_f16BitMcFlag, a_EmitterBasename, a_fNativeArchs) \
218 /* \
219 * If rm is denoting a register, no more instruction bytes. \
220 */ \
221 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
222 { \
223 switch (pVCpu->iem.s.enmEffOpSize) \
224 { \
225 case IEMMODE_16BIT: \
226 IEM_MC_BEGIN(a_f16BitMcFlag, 0); \
227 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
228 IEM_MC_ARG(uint16_t, u16Src, 1); \
229 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
230 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
231 IEM_MC_LOCAL(uint16_t, u16Dst); \
232 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
233 IEM_MC_LOCAL_EFLAGS(uEFlags); \
234 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<16>, u16Dst, u16Src, uEFlags); \
235 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
236 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
237 } IEM_MC_NATIVE_ELSE() { \
238 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
239 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
240 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
241 IEM_MC_REF_EFLAGS(pEFlags); \
242 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
243 } IEM_MC_NATIVE_ENDIF(); \
244 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
245 IEM_MC_END(); \
246 break; \
247 \
248 case IEMMODE_32BIT: \
249 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
250 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
251 IEM_MC_ARG(uint32_t, u32Src, 1); \
252 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
253 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
254 IEM_MC_LOCAL(uint32_t, u32Dst); \
255 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
256 IEM_MC_LOCAL_EFLAGS(uEFlags); \
257 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<32>, u32Dst, u32Src, uEFlags); \
258 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
259 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
260 } IEM_MC_NATIVE_ELSE() { \
261 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
262 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
263 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
264 IEM_MC_REF_EFLAGS(pEFlags); \
265 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
266 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
267 } IEM_MC_NATIVE_ENDIF(); \
268 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
269 IEM_MC_END(); \
270 break; \
271 \
272 case IEMMODE_64BIT: \
273 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
274 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
275 IEM_MC_ARG(uint64_t, u64Src, 1); \
276 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
277 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
278 IEM_MC_LOCAL(uint64_t, u64Dst); \
279 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
280 IEM_MC_LOCAL_EFLAGS(uEFlags); \
281 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<64>, u64Dst, u64Src, uEFlags); \
282 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
283 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
284 } IEM_MC_NATIVE_ELSE() { \
285 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
286 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
287 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
288 IEM_MC_REF_EFLAGS(pEFlags); \
289 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
290 } IEM_MC_NATIVE_ENDIF(); \
291 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
292 IEM_MC_END(); \
293 break; \
294 \
295 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
296 } \
297 } \
298 else \
299 { \
300 /* \
301 * We're accessing memory. \
302 */ \
303 switch (pVCpu->iem.s.enmEffOpSize) \
304 { \
305 case IEMMODE_16BIT: \
306 IEM_MC_BEGIN(a_f16BitMcFlag, 0); \
307 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
308 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
309 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
310 IEM_MC_ARG(uint16_t, u16Src, 1); \
311 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
312 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
313 IEM_MC_LOCAL(uint16_t, u16Dst); \
314 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
315 IEM_MC_LOCAL_EFLAGS(uEFlags); \
316 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<16>, u16Dst, u16Src, uEFlags); \
317 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
318 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
319 } IEM_MC_NATIVE_ELSE() { \
320 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
321 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
322 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
323 IEM_MC_REF_EFLAGS(pEFlags); \
324 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
325 } IEM_MC_NATIVE_ENDIF(); \
326 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
327 IEM_MC_END(); \
328 break; \
329 \
330 case IEMMODE_32BIT: \
331 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
332 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
333 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
334 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
335 IEM_MC_ARG(uint32_t, u32Src, 1); \
336 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
337 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
338 IEM_MC_LOCAL(uint32_t, u32Dst); \
339 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
340 IEM_MC_LOCAL_EFLAGS(uEFlags); \
341 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<32>, u32Dst, u32Src, uEFlags); \
342 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
343 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
344 } IEM_MC_NATIVE_ELSE() { \
345 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
346 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
347 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
348 IEM_MC_REF_EFLAGS(pEFlags); \
349 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
350 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
351 } IEM_MC_NATIVE_ENDIF(); \
352 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
353 IEM_MC_END(); \
354 break; \
355 \
356 case IEMMODE_64BIT: \
357 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
358 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
359 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
360 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
361 IEM_MC_ARG(uint64_t, u64Src, 1); \
362 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
363 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
364 IEM_MC_LOCAL(uint64_t, u64Dst); \
365 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
366 IEM_MC_LOCAL_EFLAGS(uEFlags); \
367 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl)<64>, u64Dst, u64Src, uEFlags); \
368 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
369 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
370 } IEM_MC_NATIVE_ELSE() { \
371 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
372 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
373 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
374 IEM_MC_REF_EFLAGS(pEFlags); \
375 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
376 } IEM_MC_NATIVE_ENDIF(); \
377 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
378 IEM_MC_END(); \
379 break; \
380 \
381 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
382 } \
383 } \
384 (void)0
385
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette