VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstCommonBodyMacros.h@ 103667

Last change on this file since 103667 was 103642, checked in by vboxsync, 14 months ago

VMM/IEM: Use native emitter for the memory variants in IEMOP_BODY_BINARY_rv_rm when available. bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.2 KB
Line 
1/* $Id: IEMAllInstCommonBodyMacros.h 103642 2024-03-02 01:01:44Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Common Body Macros.
4 *
5 * This is placed in its own file without anything else in it, so that it can
6 * be digested by SimplerParser in IEMAllInstPython.py prior processing
7 * any of the other IEMAllInstruction*.cpp.h files. For instance
8 * IEMAllInstCommon.cpp.h wouldn't do as it defines several invalid
9 * instructions and such that could confuse the parser result.
10 */
11
12/*
13 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/**
36 * Body for word/dword/qword instructions like ADD, AND, OR, ++ with a register
37 * as the destination.
38 *
39 * @note Used both in OneByte and TwoByte0f.
40 */
41#define IEMOP_BODY_BINARY_rv_rm(a_bRm, a_fnNormalU16, a_fnNormalU32, a_fnNormalU64, a_fModifiesDstReg, a_f16BitMcFlag, a_EmitterBasename, a_fNativeArchs) \
42 /* \
43 * If rm is denoting a register, no more instruction bytes. \
44 */ \
45 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
46 { \
47 switch (pVCpu->iem.s.enmEffOpSize) \
48 { \
49 case IEMMODE_16BIT: \
50 IEM_MC_BEGIN(3, 0, a_f16BitMcFlag, 0); \
51 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
52 IEM_MC_ARG(uint16_t, u16Src, 1); \
53 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
54 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
55 IEM_MC_LOCAL(uint16_t, u16Dst); \
56 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
57 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
58 IEM_MC_LOCAL(uint32_t, uEFlags); \
59 IEM_MC_FETCH_EFLAGS(uEFlags); \
60 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
61 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
62 IEM_MC_COMMIT_EFLAGS(uEFlags); \
63 } IEM_MC_NATIVE_ELSE() { \
64 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
65 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
66 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
67 IEM_MC_REF_EFLAGS(pEFlags); \
68 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
69 } IEM_MC_NATIVE_ENDIF(); \
70 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
71 IEM_MC_END(); \
72 break; \
73 \
74 case IEMMODE_32BIT: \
75 IEM_MC_BEGIN(3, 0, IEM_MC_F_MIN_386, 0); \
76 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
77 IEM_MC_ARG(uint32_t, u32Src, 1); \
78 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
79 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
80 IEM_MC_LOCAL(uint32_t, u32Dst); \
81 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
82 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
83 IEM_MC_LOCAL(uint32_t, uEFlags); \
84 IEM_MC_FETCH_EFLAGS(uEFlags); \
85 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
86 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
87 IEM_MC_COMMIT_EFLAGS(uEFlags); \
88 } IEM_MC_NATIVE_ELSE() { \
89 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
90 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
91 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
92 IEM_MC_REF_EFLAGS(pEFlags); \
93 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
94 if (a_fModifiesDstReg) \
95 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
96 } IEM_MC_NATIVE_ENDIF(); \
97 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
98 IEM_MC_END(); \
99 break; \
100 \
101 case IEMMODE_64BIT: \
102 IEM_MC_BEGIN(3, 0, IEM_MC_F_64BIT, 0); \
103 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
104 IEM_MC_ARG(uint64_t, u64Src, 1); \
105 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
106 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
107 IEM_MC_LOCAL(uint64_t, u64Dst); \
108 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
109 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
110 IEM_MC_LOCAL(uint32_t, uEFlags); \
111 IEM_MC_FETCH_EFLAGS(uEFlags); \
112 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
113 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
114 IEM_MC_COMMIT_EFLAGS(uEFlags); \
115 } IEM_MC_NATIVE_ELSE() { \
116 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
117 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
118 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
119 IEM_MC_REF_EFLAGS(pEFlags); \
120 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
121 } IEM_MC_NATIVE_ENDIF(); \
122 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
123 IEM_MC_END(); \
124 break; \
125 \
126 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
127 } \
128 } \
129 else \
130 { \
131 /* \
132 * We're accessing memory. \
133 */ \
134 switch (pVCpu->iem.s.enmEffOpSize) \
135 { \
136 case IEMMODE_16BIT: \
137 IEM_MC_BEGIN(3, 1, a_f16BitMcFlag, 0); \
138 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
139 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
140 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
141 IEM_MC_ARG(uint16_t, u16Src, 1); \
142 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
143 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
144 IEM_MC_LOCAL(uint16_t, u16Dst); \
145 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
146 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
147 IEM_MC_LOCAL(uint32_t, uEFlags); \
148 IEM_MC_FETCH_EFLAGS(uEFlags); \
149 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
150 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
151 IEM_MC_COMMIT_EFLAGS(uEFlags); \
152 } IEM_MC_NATIVE_ELSE() { \
153 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
154 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
155 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
156 IEM_MC_REF_EFLAGS(pEFlags); \
157 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
158 } IEM_MC_NATIVE_ENDIF(); \
159 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
160 IEM_MC_END(); \
161 break; \
162 \
163 case IEMMODE_32BIT: \
164 IEM_MC_BEGIN(3, 1, IEM_MC_F_MIN_386, 0); \
165 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
166 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
167 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
168 IEM_MC_ARG(uint32_t, u32Src, 1); \
169 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
170 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
171 IEM_MC_LOCAL(uint32_t, u32Dst); \
172 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
173 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
174 IEM_MC_LOCAL(uint32_t, uEFlags); \
175 IEM_MC_FETCH_EFLAGS(uEFlags); \
176 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
177 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
178 IEM_MC_COMMIT_EFLAGS(uEFlags); \
179 } IEM_MC_NATIVE_ELSE() { \
180 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
181 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
182 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
183 IEM_MC_REF_EFLAGS(pEFlags); \
184 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
185 if (a_fModifiesDstReg) \
186 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
187 } IEM_MC_NATIVE_ENDIF(); \
188 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
189 IEM_MC_END(); \
190 break; \
191 \
192 case IEMMODE_64BIT: \
193 IEM_MC_BEGIN(3, 1, IEM_MC_F_64BIT, 0); \
194 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
195 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
196 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
197 IEM_MC_ARG(uint64_t, u64Src, 1); \
198 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
199 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
200 IEM_MC_LOCAL(uint64_t, u64Dst); \
201 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
202 /** @todo IEM_MC_LOCAL_EFLAGS(uEFlags); */ \
203 IEM_MC_LOCAL(uint32_t, uEFlags); \
204 IEM_MC_FETCH_EFLAGS(uEFlags); \
205 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
206 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
207 IEM_MC_COMMIT_EFLAGS(uEFlags); \
208 } IEM_MC_NATIVE_ELSE() { \
209 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
210 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
211 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
212 IEM_MC_REF_EFLAGS(pEFlags); \
213 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
214 } IEM_MC_NATIVE_ENDIF(); \
215 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
216 IEM_MC_END(); \
217 break; \
218 \
219 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
220 } \
221 } \
222 (void)0
223
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette