VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllThrdFuncs-x86.cpp@ 108260

Last change on this file since 108260 was 108260, checked in by vboxsync, 5 weeks ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 49.8 KB
Line 
1/* $Id: IEMAllThrdFuncs-x86.cpp 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Threaded Functions, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#define IEM_WITH_OPAQUE_DECODER_STATE
37#ifdef IN_RING0
38# define VBOX_VMM_TARGET_X86
39#endif
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/pdmapic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/pgm.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/em.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <VBox/vmm/gim.h>
50#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
51# include <VBox/vmm/em.h>
52# include <VBox/vmm/hm_svm.h>
53#endif
54#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
55# include <VBox/vmm/hmvmxinline.h>
56#endif
57#include <VBox/vmm/tm.h>
58#include <VBox/vmm/dbgf.h>
59#include <VBox/vmm/dbgftrace.h>
60#include "IEMInternal.h"
61#include <VBox/vmm/vmcc.h>
62#include <VBox/log.h>
63#include <VBox/err.h>
64#include <VBox/param.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode-x86-amd64.h>
67#include <iprt/asm-math.h>
68#include <iprt/assert.h>
69#include <iprt/string.h>
70#include <iprt/x86.h>
71
72#include "IEMInline.h"
73#include "IEMInline-x86.h"
74#include "IEMInlineMem-x86.h"
75#include "IEMMc.h"
76
77#include "IEMThreadedFunctions.h"
78
79
80/*********************************************************************************************************************************
81* Defined Constants And Macros *
82*********************************************************************************************************************************/
83
84/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
85 * and only used when we're in 16-bit code on a pre-386 CPU. */
86#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \
87 return iemRegAddToIp16AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
88
89/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
90 * and used for 16-bit and 32-bit code on 386 and later CPUs. */
91#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr, a_rcNormal) \
92 return iemRegAddToEip32AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
93
94/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
95 * and only used when we're in 64-bit code. */
96#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr, a_rcNormal) \
97 return iemRegAddToRip64AndFinishingNoFlags(pVCpu, a_cbInstr, a_rcNormal)
98
99
100/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
101 * and only used when we're in 16-bit code on a pre-386 CPU and we need to
102 * check and clear flags. */
103#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \
104 return iemRegAddToIp16AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
105
106/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
107 * and used for 16-bit and 32-bit code on 386 and later CPUs and we need to
108 * check and clear flags. */
109#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbInstr, a_rcNormal) \
110 return iemRegAddToEip32AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
111
112/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
113 * and only used when we're in 64-bit code and we need to check and clear
114 * flags. */
115#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal) \
116 return iemRegAddToRip64AndFinishingClearingRF(pVCpu, a_cbInstr, a_rcNormal)
117
118#undef IEM_MC_ADVANCE_RIP_AND_FINISH
119
120
121/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
122 * parameter, for use in 16-bit code on a pre-386 CPU. */
123#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal) \
124 return iemRegIp16RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_rcNormal)
125
126/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
127 * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
128 * later CPUs. */
129#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
130 return iemRegEip32RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
131
132/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
133 * size as extra parameters, for use in flat 32-bit code on 386 and later
134 * CPUs. */
135#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
136 return iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
137
138/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
139 * size as extra parameters, for use in 64-bit code. */
140#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
141 return iemRegRip64RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
142
143/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
144 * size as extra parameters, for use in 64-bit code jumping within a page. */
145#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
146 return iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
147
148
149/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
150 * parameter, for use in 16-bit code on a pre-386 CPU and we need to check and
151 * clear flags. */
152#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal) \
153 return iemRegIp16RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_rcNormal)
154
155/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
156 * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
157 * later CPUs and we need to check and clear flags. */
158#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
159 return iemRegEip32RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
160
161/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
162 * size as extra parameters, for use in flat 32-bit code on 386 and later
163 * CPUs and we need to check and clear flags. */
164#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
165 return iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
166
167/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
168 * size as extra parameters, for use in 64-bit code and we need to check and
169 * clear flags. */
170#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
171 return iemRegRip64RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
172
173/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
174 * size as extra parameters, for use in 64-bit code jumping within a page and we
175 * need to check and clear flags. */
176#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
177 return iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
178
179#undef IEM_MC_REL_JMP_S8_AND_FINISH
180
181
182/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
183 * param, for use in 16-bit code on a pre-386 CPU. */
184#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal) \
185 return iemRegEip32RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
186
187/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
188 * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
189#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) \
190 return iemRegEip32RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
191
192/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
193 * param, for use in flat 32-bit code on 386 and later CPUs. */
194#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) \
195 return iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
196
197/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
198 * param, for use in 64-bit code. */
199#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \
200 return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
201
202/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
203 * param, for use in 64-bit code jumping with a page.
204 * @note No special function for this, there is nothing to save here. */
205#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) \
206 return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
207
208
209/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
210 * param, for use in 16-bit code on a pre-386 CPU and we need to check and
211 * clear flags. */
212#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
213 return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
214
215/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
216 * param, for use in 16-bit and 32-bit code on 386 and later CPUs and we need
217 * to check and clear flags. */
218#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
219 return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
220
221/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
222 * param, for use in flat 32-bit code on 386 and later CPUs and we need
223 * to check and clear flags. */
224#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
225 return iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
226
227/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
228 * param, for use in 64-bit code and we need to check and clear flags. */
229#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
230 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
231
232/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
233 * param, for use in 64-bit code jumping within a page and we need to check and
234 * clear flags.
235 * @note No special function for this, there is nothing to save here. */
236#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
237 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
238
239#undef IEM_MC_REL_JMP_S16_AND_FINISH
240
241
242/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
243 * an extra parameter - dummy for pre-386 variations not eliminated by the
244 * python script. */
245#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal) \
246 do { RT_NOREF(pVCpu, a_i32, a_cbInstr, a_rcNormal); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
247
248/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
249 * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
250#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) \
251 return iemRegEip32RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
252
253/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
254 * an extra parameter, for use in flat 32-bit code on 386+. */
255#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) \
256 return iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
257
258/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
259 * an extra parameter, for use in 64-bit code. */
260#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \
261 return iemRegRip64RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
262
263/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
264 * an extra parameter, for use in 64-bit code jumping within a page. */
265#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) \
266 return iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
267
268
269/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
270 * an extra parameter - dummy for pre-386 variations not eliminated by the
271 * python script. */
272#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
273 do { RT_NOREF(pVCpu, a_i32, a_cbInstr, a_rcNormal); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
274
275/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
276 * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
277 * to check and clear flags. */
278#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
279 return iemRegEip32RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
280
281/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
282 * an extra parameter, for use in flat 32-bit code on 386+ and we need
283 * to check and clear flags. */
284#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
285 return iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
286
287/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
288 * an extra parameter, for use in 64-bit code and we need to check and clear
289 * flags. */
290#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
291 return iemRegRip64RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
292
293/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
294 * an extra parameter, for use in 64-bit code jumping within a page and we need
295 * to check and clear flags. */
296#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
297 return iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
298
299#undef IEM_MC_REL_JMP_S32_AND_FINISH
300
301
302
303/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets. */
304#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) \
305 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
306
307/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets. */
308#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) \
309 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
310
311/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code. */
312#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) \
313 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP))
314
315/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets that checks and
316 * clears flags. */
317#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) \
318 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
319
320/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets that checks and
321 * clears flags. */
322#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) \
323 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
324
325/** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code that checks and
326 * clears flags. */
327#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) \
328 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), 0 /* cbInstr - not used */)
329
330#undef IEM_MC_SET_RIP_U16_AND_FINISH
331
332
333/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets. */
334#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) \
335 return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP))
336
337/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code. */
338#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
339 return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP))
340
341/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets that checks and
342 * clears flags. */
343#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) \
344 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
345
346/** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code that checks
347 * and clears flags. */
348#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
349 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
350
351#undef IEM_MC_SET_RIP_U32_AND_FINISH
352
353
354/** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code. */
355#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
356 return iemRegRipJumpU64AndFinishNoFlags((pVCpu), (a_u32NewEIP))
357
358/** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code that checks
359 * and clears flags. */
360#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
361 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u32NewEIP), 0 /* cbInstr - not used */)
362
363#undef IEM_MC_SET_RIP_U64_AND_FINISH
364
365
366/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
367 * param, for use in 16-bit code on a pre-386 CPU. */
368#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) \
369 return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
370
371/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
372 * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
373#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr) \
374 return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
375
376/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
377 * param, for use in 64-bit code. */
378#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr) \
379 return iemRegRipRelativeCallS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16))
380
381
382/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
383 * param, for use in 16-bit code on a pre-386 CPU and we need to check and
384 * clear flags. */
385#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr) \
386 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
387
388/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
389 * param, for use in 16-bit and 32-bit code on 386 and later CPUs and we need
390 * to check and clear flags. */
391#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr) \
392 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
393
394/** Variant of IEM_MC_REL_CALL_S16_AND_FINISH with instruction length as
395 * param, for use in 64-bit code and we need to check and clear flags. */
396#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr) \
397 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
398
399#undef IEM_MC_REL_CALL_S16_AND_FINISH
400
401
402/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
403 * an extra parameter - dummy for pre-386 variations not eliminated by the
404 * python script. */
405#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr) \
406 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
407
408/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
409 * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
410#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr) \
411 return iemRegEip32RelativeCallS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32))
412
413/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
414 * an extra parameter, for use in 64-bit code on 386+. */
415#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr) \
416 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
417
418/** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
419 * an extra parameter - dummy for pre-386 variations not eliminated by the
420 * python script. */
421#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr) \
422 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
423
424/** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
425 * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
426 * to check and clear flags. */
427#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr) \
428 return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32))
429
430/** Variant of IEM_MC_REL_CALL_S32_AND_FINISH with instruction length as
431 * an extra parameter, for use in 64-bit code on 386+ and we need
432 * to check and clear flags - dummy for variations not eliminated by the python script. */
433#define IEM_MC_REL_CALL_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr) \
434 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
435
436
437#undef IEM_MC_REL_CALL_S32_AND_FINISH
438
439
440/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
441 * an extra parameter, for use in 32-bit code. */
442#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC32(a_i64, a_cbInstr) \
443 do { RT_NOREF(pVCpu, a_i64, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
444
445/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
446 * an extra parameter, for use in 64-bit code. */
447#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC64(a_i64, a_cbInstr) \
448 return iemRegRip64RelativeCallS64AndFinishNoFlags(pVCpu, a_cbInstr, (a_i64))
449
450
451/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
452 * an extra parameter, for use in 16-bit and 32-bit code on 386+ and we need
453 * to check and clear flags - dummy for variations not eliminated by the python script. */
454#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i64, a_cbInstr) \
455 do { RT_NOREF(pVCpu, a_i64, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
456
457/** Variant of IEM_MC_REL_CALL_S64_AND_FINISH with instruction length as
458 * an extra parameter, for use in 64-bit code and we need to check and clear
459 * flags. */
460#define IEM_MC_REL_CALL_S64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i64, a_cbInstr) \
461 return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, a_cbInstr, (a_i64))
462
463#undef IEM_MC_REL_CALL_S64_AND_FINISH
464
465
466/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for pre-386 targets. */
467#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC16(a_u16NewIP, a_cbInstr) \
468 return iemRegIp16IndirectCallU16AndFinishNoFlags((pVCpu), a_cbInstr, (a_u16NewIP))
469
470/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for 386+ targets. */
471#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC32(a_u16NewIP, a_cbInstr) \
472 return iemRegEip32IndirectCallU16AndFinishNoFlags((pVCpu), a_cbInstr, (a_u16NewIP))
473
474/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for use in 64-bit code. */
475#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC64(a_u16NewIP, a_cbInstr) \
476 do { RT_NOREF(pVCpu, a_u16NewIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
477
478/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for pre-386 targets that checks and
479 * clears flags. */
480#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
481 return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), a_cbInstr, (a_u16NewIP))
482
483/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for 386+ targets that checks and
484 * clears flags. */
485#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
486 return iemRegEip32IndirectCallU16AndFinishClearingRF((pVCpu), a_cbInstr, (a_u16NewIP))
487
488/** Variant of IEM_MC_IND_CALL_U16_AND_FINISH for use in 64-bit code that checks and
489 * clears flags. */
490#define IEM_MC_IND_CALL_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP, a_cbInstr) \
491 do { RT_NOREF(pVCpu, a_u16NewIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
492
493#undef IEM_MC_IND_CALL_U16_AND_FINISH
494
495
496/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for 386+ targets. */
497#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP, a_cbInstr) \
498 return iemRegEip32IndirectCallU32AndFinishNoFlags((pVCpu), a_cbInstr, (a_u32NewEIP))
499
500/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for use in 64-bit code. */
501#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP, a_cbInstr) \
502 do { RT_NOREF(pVCpu, a_u32NewEIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
503
504/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for 386+ targets that checks and
505 * clears flags. */
506#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP, a_cbInstr) \
507 return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), a_cbInstr, (a_u32NewEIP))
508
509/** Variant of IEM_MC_IND_CALL_U32_AND_FINISH for use in 64-bit code that checks
510 * and clears flags. */
511#define IEM_MC_IND_CALL_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP, a_cbInstr) \
512 do { RT_NOREF(pVCpu, a_u32NewEIP, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
513
514#undef IEM_MC_IND_CALL_U32_AND_FINISH
515
516
517/** Variant of IEM_MC_IND_CALL_U64_AND_FINISH for use in 64-bit code. */
518#define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64(a_u32NewRIP, a_cbInstr) \
519 return iemRegRip64IndirectCallU64AndFinishNoFlags((pVCpu), a_cbInstr, (a_u32NewRIP))
520
521/** Variant of IEM_MC_IND_CALL_U64_AND_FINISH for use in 64-bit code that checks
522 * and clears flags. */
523#define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewRIP, a_cbInstr) \
524 return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), a_cbInstr, (a_u32NewRIP))
525
526#undef IEM_MC_IND_CALL_U64_AND_FINISH
527
528
529/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets. */
530#define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_cbPopArgs, a_cbInstr) \
531 return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), IEMMODE_16BIT)
532
533/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets. */
534#define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
535 return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
536
537/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code. */
538#define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
539 return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
540
541/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets that checks and
542 * clears flags. */
543#define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbPopArgs, a_cbInstr) \
544 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), IEMMODE_16BIT)
545
546/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets that checks and
547 * clears flags. */
548#define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
549 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
550
551/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code that checks and
552 * clears flags. */
553#define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
554 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
555
556#undef IEM_MC_RETN_AND_FINISH
557
558
559/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 16-bit. */
560#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp) \
561 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
562
563/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 32-bit. */
564#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
565 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_uSibAndRspOffset, a_u32Disp)
566
567/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
568#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
569 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
570
571/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
572#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
573 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
574
575/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters.
576 * @todo How did that address prefix thing work for 64-bit code again? */
577#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
578 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
579
580#undef IEM_MC_CALC_RM_EFF_ADDR
581
582
583/** Variant of IEM_MC_CALL_CIMPL_1 with explicit instruction length parameter. */
584#define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
585 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
586#undef IEM_MC_CALL_CIMPL_1
587
588/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
589#define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
590 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
591#undef IEM_MC_CALL_CIMPL_2
592
593/** Variant of IEM_MC_CALL_CIMPL_3 with explicit instruction length parameter. */
594#define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
595 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
596#undef IEM_MC_CALL_CIMPL_3
597
598/** Variant of IEM_MC_CALL_CIMPL_4 with explicit instruction length parameter. */
599#define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
600 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
601#undef IEM_MC_CALL_CIMPL_4
602
603/** Variant of IEM_MC_CALL_CIMPL_5 with explicit instruction length parameter. */
604#define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
605 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
606#undef IEM_MC_CALL_CIMPL_5
607
608
609/** Variant of IEM_MC_DEFER_TO_CIMPL_0_RET with explicit instruction
610 * length parameter. */
611#define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
612 return (a_pfnCImpl)(pVCpu, (a_cbInstr))
613#undef IEM_MC_DEFER_TO_CIMPL_0_RET
614
615/** Variant of IEM_MC_DEFER_TO_CIMPL_1_RET with explicit instruction
616 * length parameter. */
617#define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
618 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
619#undef IEM_MC_DEFER_TO_CIMPL_1_RET
620
621/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
622#define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
623 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
624#undef IEM_MC_DEFER_TO_CIMPL_2_RET
625
626/** Variant of IEM_MC_DEFER_TO_CIMPL_3 with explicit instruction length
627 * parameter. */
628#define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
629 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
630#undef IEM_MC_DEFER_TO_CIMPL_3_RET
631
632/** Variant of IEM_MC_DEFER_TO_CIMPL_4 with explicit instruction length
633 * parameter. */
634#define IEM_MC_DEFER_TO_CIMPL_4_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
635 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
636#undef IEM_MC_DEFER_TO_CIMPL_4_RET
637
638/** Variant of IEM_MC_DEFER_TO_CIMPL_5 with explicit instruction length
639 * parameter. */
640#define IEM_MC_DEFER_TO_CIMPL_5_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
641 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
642#undef IEM_MC_DEFER_TO_CIMPL_5_RET
643
644
645/** Variant of IEM_MC_FETCH_GREG_U8 with extended (20) register index. */
646#define IEM_MC_FETCH_GREG_U8_THREADED(a_u8Dst, a_iGRegEx) \
647 (a_u8Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
648
649/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U16 with extended (20) register index. */
650#define IEM_MC_FETCH_GREG_U8_ZX_U16_THREADED(a_u16Dst, a_iGRegEx) \
651 (a_u16Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
652
653/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U32 with extended (20) register index. */
654#define IEM_MC_FETCH_GREG_U8_ZX_U32_THREADED(a_u32Dst, a_iGRegEx) \
655 (a_u32Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
656
657/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U64 with extended (20) register index. */
658#define IEM_MC_FETCH_GREG_U8_ZX_U64_THREADED(a_u64Dst, a_iGRegEx) \
659 (a_u64Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
660
661/** Variant of IEM_MC_FETCH_GREG_U8_SX_U16 with extended (20) register index. */
662#define IEM_MC_FETCH_GREG_U8_SX_U16_THREADED(a_u16Dst, a_iGRegEx) \
663 (a_u16Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
664
665/** Variant of IEM_MC_FETCH_GREG_U8_SX_U32 with extended (20) register index. */
666#define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \
667 (a_u32Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
668#undef IEM_MC_FETCH_GREG_U8_SX_U32
669
670/** Variant of IEM_MC_FETCH_GREG_U8_SX_U64 with extended (20) register index. */
671#define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \
672 (a_u64Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
673#undef IEM_MC_FETCH_GREG_U8_SX_U64
674
675/** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
676#define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
677 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
678#undef IEM_MC_STORE_GREG_U8
679
680/** Variant of IEM_MC_STORE_GREG_U8_CONST with extended (20) register index. */
681#define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \
682 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
683#undef IEM_MC_STORE_GREG_U8
684
685/** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */
686#define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \
687 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
688#undef IEM_MC_REF_GREG_U8
689
690/** Variant of IEM_MC_REF_GREG_U8_CONST with extended (20) register index. */
691#define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) \
692 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
693#undef IEM_MC_REF_GREG_U8
694
695/** Variant of IEM_MC_ADD_GREG_U8_TO_LOCAL with extended (20) register index. */
696#define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) \
697 do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0)
698#undef IEM_MC_ADD_GREG_U8_TO_LOCAL
699
700/** Variant of IEM_MC_AND_GREG_U8 with extended (20) register index. */
701#define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
702 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) &= (a_u8Value)
703#undef IEM_MC_AND_GREG_U8
704
705/** Variant of IEM_MC_OR_GREG_U8 with extended (20) register index. */
706#define IEM_MC_OR_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
707 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) |= (a_u8Value)
708#undef IEM_MC_OR_GREG_U8
709
710
711/** For asserting that only declared output flags changed. */
712#ifndef VBOX_STRICT
713# define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) ((void)0)
714#else
715# undef IEM_MC_REF_EFLAGS_EX
716# define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) \
717 uint32_t const fEflAssert = pVCpu->cpum.GstCtx.eflags.uBoth; \
718 IEM_MC_REF_EFLAGS(a_pEFlags)
719# define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) \
720 AssertMsg((pVCpu->cpum.GstCtx.eflags.uBoth & ~(a_fEflOutput)) == (fEflAssert & ~(a_fEflOutput)), \
721 ("now %#x (%#x), was %#x (%#x) - diff %#x; a_fEflOutput=%#x\n", \
722 (pVCpu->cpum.GstCtx.eflags.uBoth & ~(a_fEflOutput)), pVCpu->cpum.GstCtx.eflags.uBoth, \
723 (fEflAssert & ~(a_fEflOutput)), fEflAssert, \
724 (pVCpu->cpum.GstCtx.eflags.uBoth ^ fEflAssert) & ~(a_fEflOutput), a_fEflOutput))
725#endif
726
727
728
729/**
730 * Calculates the effective address of a ModR/M memory operand, 16-bit
731 * addressing variant.
732 *
733 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16.
734 *
735 * @returns The effective address.
736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
737 * @param bRm The ModRM byte.
738 * @param u16Disp The displacement byte/word, if any.
739 * RIP relative addressing.
740 */
741static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT
742{
743 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x u16Disp=%#x\n", bRm, u16Disp));
744 Assert(!IEM_IS_64BIT_CODE(pVCpu));
745
746 /* Handle the disp16 form with no registers first. */
747 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
748 {
749 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp));
750 return u16Disp;
751 }
752
753 /* Get the displacment. */
754 /** @todo we can eliminate this step by making u16Disp have this value
755 * already! */
756 uint16_t u16EffAddr;
757 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
758 {
759 case 0: u16EffAddr = 0; break;
760 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
761 case 2: u16EffAddr = u16Disp; break;
762 default: AssertFailedStmt(u16EffAddr = 0);
763 }
764
765 /* Add the base and index registers to the disp. */
766 switch (bRm & X86_MODRM_RM_MASK)
767 {
768 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
769 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
770 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break;
771 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break;
772 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
773 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
774 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break;
775 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
776 }
777
778 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr));
779 return u16EffAddr;
780}
781
782
783/**
784 * Calculates the effective address of a ModR/M memory operand, 32-bit
785 * addressing variant.
786 *
787 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and
788 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT.
789 *
790 * @returns The effective address.
791 * @param pVCpu The cross context virtual CPU structure of the
792 * calling thread.
793 * @param bRm The ModRM byte.
794 * @param uSibAndRspOffset Two parts:
795 * - The first 8 bits make up the SIB byte.
796 * - The next 8 bits are the fixed RSP/ESP offse
797 * in case of a pop [xSP].
798 * @param u32Disp The displacement byte/dword, if any.
799 */
800static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint32_t uSibAndRspOffset,
801 uint32_t u32Disp) RT_NOEXCEPT
802{
803 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x uSibAndRspOffset=%#x u32Disp=%#x\n", bRm, uSibAndRspOffset, u32Disp));
804
805 /* Handle the disp32 form with no registers first. */
806 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
807 {
808 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp));
809 return u32Disp;
810 }
811
812 /* Get the register (or SIB) value. */
813 uint32_t u32EffAddr;
814#ifdef _MSC_VER
815 u32EffAddr = 0;/* MSC uninitialized variable analysis is too simple, it seems. */
816#endif
817 switch (bRm & X86_MODRM_RM_MASK)
818 {
819 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
820 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
821 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
822 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
823 case 4: /* SIB */
824 {
825 /* Get the index and scale it. */
826 switch ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
827 {
828 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
829 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
830 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
831 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
832 case 4: u32EffAddr = 0; /*none */ break;
833 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
834 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
835 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
836 }
837 u32EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
838
839 /* add base */
840 switch (uSibAndRspOffset & X86_SIB_BASE_MASK)
841 {
842 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
843 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
844 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
845 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
846 case 4:
847 u32EffAddr += pVCpu->cpum.GstCtx.esp;
848 u32EffAddr += uSibAndRspOffset >> 8;
849 break;
850 case 5:
851 if ((bRm & X86_MODRM_MOD_MASK) != 0)
852 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
853 else
854 u32EffAddr += u32Disp;
855 break;
856 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
857 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
858 }
859 break;
860 }
861 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
862 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
863 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
864 }
865
866 /* Get and add the displacement. */
867 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
868 {
869 case 0: break;
870 case 1: u32EffAddr += (int8_t)u32Disp; break;
871 case 2: u32EffAddr += u32Disp; break;
872 default: AssertFailed();
873 }
874
875 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr));
876 return u32EffAddr;
877}
878
879
880/**
881 * Calculates the effective address of a ModR/M memory operand.
882 *
883 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64.
884 *
885 * @returns The effective address.
886 * @param pVCpu The cross context virtual CPU structure of the
887 * calling thread.
888 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
889 * bit 4 to REX.X. The two bits are part of the
890 * REG sub-field, which isn't needed in this
891 * function.
892 * @param uSibAndRspOffset Two parts:
893 * - The first 8 bits make up the SIB byte.
894 * - The next 8 bits are the fixed RSP/ESP offset
895 * in case of a pop [xSP].
896 * @param u32Disp The displacement byte/word/dword, if any.
897 * @param cbInstr The size of the fully decoded instruction. Used
898 * for RIP relative addressing.
899 * @todo combine cbInstr and cbImm!
900 */
901static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint32_t uSibAndRspOffset,
902 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT
903{
904 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx));
905 Assert(IEM_IS_64BIT_CODE(pVCpu));
906
907 uint64_t u64EffAddr;
908
909 /* Handle the rip+disp32 form with no registers first. */
910 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
911 {
912 u64EffAddr = (int32_t)u32Disp;
913 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr;
914 }
915 else
916 {
917 /* Get the register (or SIB) value. */
918#ifdef _MSC_VER
919 u64EffAddr = 0; /* MSC uninitialized variable analysis is too simple, it seems. */
920#endif
921 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
922 {
923 default:
924 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
925 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
926 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
927 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
928 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
929 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
930 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
931 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
932 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
933 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
934 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
935 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
936 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
937 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
938 /* SIB */
939 case 4:
940 case 12:
941 {
942 /* Get the index and scale it. */
943 switch ( ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
944 | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */
945 {
946 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
947 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
948 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
949 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
950 case 4: u64EffAddr = 0; /*none */ break;
951 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
952 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
953 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
954 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
955 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
956 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
957 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
958 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
959 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
960 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
961 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
962 }
963 u64EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
964
965 /* add base */
966 switch ((uSibAndRspOffset & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */
967 {
968 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
969 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
970 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
971 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
972 case 4:
973 u64EffAddr += pVCpu->cpum.GstCtx.rsp;
974 u64EffAddr += uSibAndRspOffset >> 8;
975 break;
976 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
977 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
978 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
979 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
980 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
981 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
982 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
983 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
984 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
985 /* complicated encodings */
986 case 5:
987 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
988 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
989 else
990 u64EffAddr += (int32_t)u32Disp;
991 break;
992 case 13:
993 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
994 u64EffAddr += pVCpu->cpum.GstCtx.r13;
995 else
996 u64EffAddr += (int32_t)u32Disp;
997 break;
998 }
999 break;
1000 }
1001 }
1002
1003 /* Get and add the displacement. */
1004 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1005 {
1006 case 0: break;
1007 case 1: u64EffAddr += (int8_t)u32Disp; break;
1008 case 2: u64EffAddr += (int32_t)u32Disp; break;
1009 default: AssertFailed();
1010 }
1011 }
1012
1013 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr));
1014 return u64EffAddr;
1015}
1016
1017
1018/*
1019 * The threaded functions.
1020 */
1021#include "IEMThreadedFunctions.cpp.h"
1022
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette