VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 104882

Last change on this file since 104882 was 104419, checked in by vboxsync, 7 months ago

VMM/IEM: Convert near return (retn) and relative/indirect call instructions to special IEM MC statements in order to be able to recompile them, bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 173.0 KB
Line 
1/* $Id: IEMMc.h 104419 2024-04-24 14:32:29Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42
43#define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) {
44#define IEM_MC_END() }
45
46/** Internal macro. */
47#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
48 do \
49 { \
50 VBOXSTRICTRC rcStrict2 = a_Expr; \
51 if (rcStrict2 == VINF_SUCCESS) \
52 { /* likely */ } \
53 else \
54 return rcStrict2; \
55 } while (0)
56
57
58/** Dummy MC that prevents native recompilation. */
59#define IEM_MC_NO_NATIVE_RECOMPILE() ((void)0)
60
61/** Advances RIP, finishes the instruction and returns.
62 * This may include raising debug exceptions and such. */
63#define IEM_MC_ADVANCE_RIP_AND_FINISH() return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
64/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
65#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
66 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
67/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
68 * @note only usable in 16-bit op size mode. */
69#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
70 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
71/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
72#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
73 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
74/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
75#define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) \
76 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), IEM_GET_INSTR_LEN(pVCpu))
77/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
78#define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) \
79 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP), IEM_GET_INSTR_LEN(pVCpu))
80/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
81#define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) \
82 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP), IEM_GET_INSTR_LEN(pVCpu))
83
84/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
85 * @note only usable in 16-bit op size mode. */
86#define IEM_MC_REL_CALL_S16_AND_FINISH(a_i16) \
87 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
88/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
89#define IEM_MC_REL_CALL_S32_AND_FINISH(a_i32) \
90 return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32))
91/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
92#define IEM_MC_REL_CALL_S64_AND_FINISH(a_i64) \
93 return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i64))
94/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
95#define IEM_MC_IND_CALL_U16_AND_FINISH(a_u16NewIP) \
96 return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u16NewIP))
97/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
98#define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP) \
99 return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u32NewIP))
100/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
101#define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP) \
102 return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u64NewIP))
103
104
105/** Fetches the near return address from the stack, sets RIP and RSP (may trigger
106 * \#GP or \#SS), finishes the instruction and returns. */
107#define IEM_MC_RETN_AND_FINISH(a_u16Pop) \
108 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u16Pop), pVCpu->iem.s.enmEffOpSize)
109
110
111#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
112#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
113 do { \
114 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)))) \
115 { /* probable */ } \
116 else return iemRaiseDeviceNotAvailable(pVCpu); \
117 } while (0)
118#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
119 do { \
120 if (RT_LIKELY(!((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)))) \
121 { /* probable */ } \
122 else return iemRaiseDeviceNotAvailable(pVCpu); \
123 } while (0)
124#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
125 do { \
126 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))) \
127 { /* probable */ } \
128 else return iemRaiseMathFault(pVCpu); \
129 } while (0)
130#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
131 do { \
132 /* Since none of the bits we compare from XCR0, CR4 and CR0 overlap, it can \
133 be reduced to a single compare branch in the more probably code path. */ \
134 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) \
135 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
136 | (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) \
137 == (XSAVE_C_YMM | XSAVE_C_SSE | X86_CR4_OSXSAVE))) \
138 { /* probable */ } \
139 else if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
140 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) \
141 return iemRaiseUndefinedOpcode(pVCpu); \
142 else \
143 return iemRaiseDeviceNotAvailable(pVCpu); \
144 } while (0)
145AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR4_OSXSAVE));
146AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR0_TS));
147AssertCompile(!(X86_CR4_OSXSAVE & X86_CR0_TS));
148#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
149 do { \
150 /* Since the CR4 and CR0 bits doesn't overlap, it can be reduced to a
151 single compare branch in the more probable code path. */ \
152 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
153 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
154 == X86_CR4_OSFXSR)) \
155 { /* likely */ } \
156 else if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
157 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
158 return iemRaiseUndefinedOpcode(pVCpu); \
159 else \
160 return iemRaiseDeviceNotAvailable(pVCpu); \
161 } while (0)
162AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_CR4_OSFXSR));
163#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
164 do { \
165 /* Since the two CR0 bits doesn't overlap with FSW.ES, this can be reduced to a
166 single compare branch in the more probable code path. */ \
167 if (RT_LIKELY(!( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
168 | (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES)))) \
169 { /* probable */ } \
170 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
171 return iemRaiseUndefinedOpcode(pVCpu); \
172 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
173 return iemRaiseDeviceNotAvailable(pVCpu); \
174 else \
175 return iemRaiseMathFault(pVCpu); \
176 } while (0)
177AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_FSW_ES));
178/** @todo recomp: this one is slightly problematic as the recompiler doesn't
179 * count the CPL into the TB key. However it is safe enough for now, as
180 * it calls iemRaiseGeneralProtectionFault0 directly so no calls will be
181 * emitted for it. */
182#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
183 do { \
184 if (RT_LIKELY(IEM_GET_CPL(pVCpu) == 0)) { /* probable */ } \
185 else return iemRaiseGeneralProtectionFault0(pVCpu); \
186 } while (0)
187#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
188 do { \
189 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
190 else return iemRaiseGeneralProtectionFault0(pVCpu); \
191 } while (0)
192#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
193 do { \
194 if (RT_LIKELY( ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE) | IEM_GET_CPU_MODE(pVCpu)) \
195 == (X86_CR4_FSGSBASE | IEMMODE_64BIT))) \
196 { /* probable */ } \
197 else return iemRaiseUndefinedOpcode(pVCpu); \
198 } while (0)
199AssertCompile(X86_CR4_FSGSBASE > UINT8_MAX);
200#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
201 do { \
202 if (RT_LIKELY(IEM_IS_CANONICAL(a_u64Addr))) { /* likely */ } \
203 else return iemRaiseGeneralProtectionFault0(pVCpu); \
204 } while (0)
205#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
206 do { \
207 if (RT_LIKELY(( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
208 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)) \
209 { /* probable */ } \
210 else \
211 { \
212 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
213 return iemRaiseSimdFpException(pVCpu); \
214 return iemRaiseUndefinedOpcode(pVCpu); \
215 } \
216 } while (0)
217
218
219#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
220#define IEM_MC_LOCAL_ASSIGN(a_Type, a_Name, a_Value) a_Type a_Name = (a_Value)
221#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
222#define IEM_MC_NOREF(a_Name) RT_NOREF_PV(a_Name) /* NOP/liveness hack */
223#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
224#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
225#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
226/** @note IEMAllInstPython.py duplicates the expansion. */
227#define IEM_MC_ARG_EFLAGS(a_Name, a_iArg) uint32_t const a_Name = pVCpu->cpum.GstCtx.eflags.u
228/** @note IEMAllInstPython.py duplicates the expansion. */
229#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
230 uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u; \
231 uint32_t *a_pName = &a_Name
232/** @note IEMAllInstPython.py duplicates the expansion. */
233#define IEM_MC_LOCAL_EFLAGS(a_Name) uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u
234#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
235 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
236#define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) do { \
237 AssertMsg((pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) == ((a_EFlags) & ~(a_fEflOutput)), \
238 ("eflags.u=%#x (%#x) vs %s=%#x (%#x) - diff %#x (a_fEflOutput=%#x)\n", \
239 pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput), pVCpu->cpum.GstCtx.eflags.u, #a_EFlags, \
240 (a_EFlags) & ~(a_fEflOutput), (a_EFlags), \
241 (pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) ^ ((a_EFlags) & ~(a_fEflOutput)), a_fEflOutput)); \
242 pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); \
243 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); \
244 } while (0)
245#define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags) IEM_MC_COMMIT_EFLAGS(a_EFlags)
246#define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput)
247
248/** ASSUMES the source variable not used after this statement. */
249#define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) (a_VarDst) = (a_VarSrcEol)
250
251#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
252#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
253#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
254#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
255#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
256#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
257#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
258#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
259#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
260#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
261#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
262#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
263#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
264#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
265#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
266#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
267#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
268#define IEM_MC_FETCH_GREG_PAIR_U32(a_u64Dst, a_iGRegLo, a_iGRegHi) do { \
269 (a_u64Dst).s.Lo = iemGRegFetchU32(pVCpu, (a_iGRegLo)); \
270 (a_u64Dst).s.Hi = iemGRegFetchU32(pVCpu, (a_iGRegHi)); \
271 } while(0)
272#define IEM_MC_FETCH_GREG_PAIR_U64(a_u128Dst, a_iGRegLo, a_iGRegHi) do { \
273 (a_u128Dst).s.Lo = iemGRegFetchU64(pVCpu, (a_iGRegLo)); \
274 (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
275 } while(0)
276#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
277 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
278 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
279 } while (0)
280#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
281 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
282 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
283 } while (0)
284#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
285 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
286 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
287 } while (0)
288/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
289#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
290 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
291 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
292 } while (0)
293#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
294 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
295 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
296 } while (0)
297/** @note Not for IOPL or IF testing or modification. */
298#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
299#define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_FETCH_EFLAGS(a_EFlags)
300#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u /* (only LAHF) */
301#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
302#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
303
304#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
305#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
306#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
307#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
308#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
309#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
310#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
311#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
312#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
313#define IEM_MC_STORE_GREG_PAIR_U32(a_iGRegLo, a_iGRegHi, a_u64Value) do { \
314 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint32_t)(a_u64Value).s.Lo; \
315 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint32_t)(a_u64Value).s.Hi; \
316 } while(0)
317#define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) do { \
318 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint64_t)(a_u128Value).s.Lo; \
319 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint64_t)(a_u128Value).s.Hi; \
320 } while(0)
321#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
322
323/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
324#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
325 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
326 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
327 } while (0)
328#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
329 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
330 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
331 } while (0)
332#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
333 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
334
335
336#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
337#define IEM_MC_REF_GREG_U8_CONST(a_pu8Dst, a_iGReg) (a_pu8Dst) = (uint8_t const *)iemGRegRefU8( pVCpu, (a_iGReg))
338#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
339#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
340/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
341 * Use IEM_MC_CLEAR_HIGH_GREG_U64! */
342#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
343#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
344#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
345#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
346#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
347#define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
348#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
349#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
350/** @note Not for IOPL or IF testing or modification.
351 * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
352#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
353#define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) IEM_MC_REF_EFLAGS(a_pEFlags)
354
355#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
356#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
357 do { \
358 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
359 *pu32Reg += (a_u32Value); \
360 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
361 } while (0)
362#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
363
364#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8Const) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u8Const)
365#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
366 do { \
367 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
368 *pu32Reg -= (a_u8Const); \
369 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
370 } while (0)
371#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
372#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
373
374#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
375#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
376#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
377#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
378#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
379#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
380#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
381
382#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
383#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
384#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
385#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
386
387#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
388#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
389#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
390
391#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
392#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
393#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
394
395#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
396#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
397#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
398
399#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
400
401#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
402#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
403#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
404
405#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
406
407#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
408
409#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
410#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
411#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
412 do { \
413 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
414 *pu32Reg &= (a_u32Value); \
415 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
416 } while (0)
417#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
418
419#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
420#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
421#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
422 do { \
423 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
424 *pu32Reg |= (a_u32Value); \
425 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
426 } while (0)
427#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
428
429#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
430#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
431#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
432
433/** @note Not for IOPL or IF modification. */
434#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
435/** @note Not for IOPL or IF modification. */
436#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
437/** @note Not for IOPL or IF modification. */
438#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
439
440#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
441
442/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
443#define IEM_MC_FPU_TO_MMX_MODE() do { \
444 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
445 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
446 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
447 } while (0)
448
449/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
450#define IEM_MC_FPU_FROM_MMX_MODE() do { \
451 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
452 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
453 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
454 } while (0)
455
456#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
457 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
458#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg, a_iDWord) \
459 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[a_iDWord]; } while (0)
460#define IEM_MC_FETCH_MREG_U16(a_u16Value, a_iMReg, a_iWord) \
461 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[a_iWord]; } while (0)
462#define IEM_MC_FETCH_MREG_U8(a_u8Value, a_iMReg, a_iByte) \
463 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[a_iByte]; } while (0)
464#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
465 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
466 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
467 } while (0)
468#define IEM_MC_STORE_MREG_U32(a_iMReg, a_iDword, a_u32Value) \
469 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[(a_iDword)] = (a_u32Value); \
470 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
471 } while (0)
472#define IEM_MC_STORE_MREG_U16(a_iMReg, a_iWord, a_u16Value) \
473 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[(a_iWord)] = (a_u16Value); \
474 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
475 } while (0)
476#define IEM_MC_STORE_MREG_U8(a_iMReg, a_iByte, a_u8Value) \
477 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[(a_iByte)] = (a_u8Value); \
478 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
479 } while (0)
480#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
481 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
482 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
483 } while (0)
484#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
485 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
486#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
487 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
488#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
489 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
490#define IEM_MC_MODIFIED_MREG(a_iMReg) \
491 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
492#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
493 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
494
495#define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
496 do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
497 if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
498 if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
499 if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
500 } while (0)
501#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
502 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
503 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
504 } while (0)
505#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
506 do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
507 (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
508 } while (0)
509#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
510 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
511#define IEM_MC_FETCH_XREG_R64(a_r64Value, a_iXReg, a_iQWord) \
512 do { (a_r64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[(a_iQWord)]; } while (0)
513#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
514 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
515#define IEM_MC_FETCH_XREG_R32(a_r32Value, a_iXReg, a_iDWord) \
516 do { (a_r32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[(a_iDWord)]; } while (0)
517#define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
518 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
519#define IEM_MC_FETCH_XREG_U8( a_u8Value, a_iXReg, a_iByte) \
520 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)]; } while (0)
521#define IEM_MC_FETCH_XREG_PAIR_U128(a_Dst, a_iXReg1, a_iXReg2) \
522 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
523 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
524 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
525 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
526 } while (0)
527#define IEM_MC_FETCH_XREG_PAIR_XMM(a_Dst, a_iXReg1, a_iXReg2) \
528 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
529 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
530 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
531 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
532 } while (0)
533#define IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iXReg2) \
534 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
535 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
536 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
537 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
538 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
539 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
540 } while (0)
541#define IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iXReg2) \
542 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
543 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
544 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
545 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
546 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
547 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
548 } while (0)
549#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
550 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
551 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
552 } while (0)
553#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
554 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
555 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
556 } while (0)
557#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
558 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
559#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
560 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
561#define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
562 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
563#define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
564 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
565#define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
566 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)] = (a_u16Value); } while (0)
567#define IEM_MC_STORE_XREG_U8(a_iXReg, a_iByte, a_u8Value) \
568 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)] = (a_u8Value); } while (0)
569
570#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
571 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
572 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
573 } while (0)
574
575#define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
576 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
577#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
578 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
579#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
580 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
581#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
582 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
583 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
584 } while (0)
585
586#define IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX(a_iXRegDst, a_u8Src) \
587 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
588 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[0] = (a_u8Src); \
589 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[1] = (a_u8Src); \
590 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[2] = (a_u8Src); \
591 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[3] = (a_u8Src); \
592 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[4] = (a_u8Src); \
593 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[5] = (a_u8Src); \
594 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[6] = (a_u8Src); \
595 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[7] = (a_u8Src); \
596 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[8] = (a_u8Src); \
597 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[9] = (a_u8Src); \
598 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[10] = (a_u8Src); \
599 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[11] = (a_u8Src); \
600 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[12] = (a_u8Src); \
601 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[13] = (a_u8Src); \
602 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[14] = (a_u8Src); \
603 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[15] = (a_u8Src); \
604 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
605 } while (0)
606#define IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX(a_iXRegDst, a_u16Src) \
607 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
608 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[0] = (a_u16Src); \
609 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[1] = (a_u16Src); \
610 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[2] = (a_u16Src); \
611 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[3] = (a_u16Src); \
612 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[4] = (a_u16Src); \
613 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[5] = (a_u16Src); \
614 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[6] = (a_u16Src); \
615 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[7] = (a_u16Src); \
616 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
617 } while (0)
618#define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
619 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
620 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[0] = (a_u32Src); \
621 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[1] = (a_u32Src); \
622 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[2] = (a_u32Src); \
623 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[3] = (a_u32Src); \
624 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
625 } while (0)
626#define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
627 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
628 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[0] = (a_u64Src); \
629 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[1] = (a_u64Src); \
630 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
631 } while (0)
632
633#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
634 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
635#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
636 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
637#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
638 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
639#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
640 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
641#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
642 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
643#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
644 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
645#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
646 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
647#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
648 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
649 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
650 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
651 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
652 } while (0)
653
654#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
655 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
656 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
657 } while (0)
658#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc, a_iQWord) \
659 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
660 if ((a_iQWord) < 2) \
661 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[(a_iQWord)]; \
662 else \
663 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[(a_iQWord) - 2]; \
664 } while (0)
665#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc, a_iDQword) \
666 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
667 if ((a_iDQword) == 0) \
668 { \
669 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[0]; \
670 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[1]; \
671 } \
672 else \
673 { \
674 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[0]; \
675 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[1]; \
676 } \
677 } while (0)
678#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
679 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
680 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
681 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
682 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
683 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
684 } while (0)
685
686#define IEM_MC_STORE_YREG_U128(a_iYRegDst, a_iDQword, a_u128Value) \
687 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
688 if ((a_iDQword) == 0) \
689 { \
690 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
691 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
692 } \
693 else \
694 { \
695 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
696 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
697 } \
698 } while (0)
699
700#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
701#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
702 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
703 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
704 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
705 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
706 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
707 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
708 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
709 } while (0)
710#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
711 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
712 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
713 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
714 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
715 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
716 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
717 } while (0)
718#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
719 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
720 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
721 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
722 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
723 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
724 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
725 } while (0)
726#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
727 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
728 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
729 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
730 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
731 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
732 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
733 } while (0)
734#define IEM_MC_STORE_YREG_U32_U256(a_iYRegDst, a_iDwDst, a_u256Value, a_iDwSrc) \
735 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
736 if ((a_iDwDst) < 4) \
737 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au32[(a_iDwDst)] = (a_u256Value).au32[(a_iDwSrc)]; \
738 else \
739 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au32[(a_iDwDst) - 4] = (a_u256Value).au32[(a_iDwSrc)]; \
740 } while (0)
741#define IEM_MC_STORE_YREG_U64_U256(a_iYRegDst, a_iQwDst, a_u256Value, a_iQwSrc) \
742 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
743 if ((a_iQwDst) < 2) \
744 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[(a_iQwDst)] = (a_u256Value).au64[(a_iQwSrc)]; \
745 else \
746 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[(a_iQwDst) - 2] = (a_u256Value).au64[(a_iQwSrc)]; \
747 } while (0)
748#define IEM_MC_STORE_YREG_U64(a_iYRegDst, a_iQword, a_u64Value) \
749 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
750 if ((a_iQword) < 2) \
751 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[(a_iQword)] = (a_u64Value); \
752 else \
753 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[(a_iQword) - 2] = (a_u64Value); \
754 } while (0)
755
756#define IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX(a_iYRegDst, a_u8Src) \
757 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
758 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[0] = (a_u8Src); \
759 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[1] = (a_u8Src); \
760 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[2] = (a_u8Src); \
761 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[3] = (a_u8Src); \
762 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[4] = (a_u8Src); \
763 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[5] = (a_u8Src); \
764 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[6] = (a_u8Src); \
765 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[7] = (a_u8Src); \
766 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[8] = (a_u8Src); \
767 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[9] = (a_u8Src); \
768 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[10] = (a_u8Src); \
769 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[11] = (a_u8Src); \
770 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[12] = (a_u8Src); \
771 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[13] = (a_u8Src); \
772 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[14] = (a_u8Src); \
773 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[15] = (a_u8Src); \
774 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[0] = (a_u8Src); \
775 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[1] = (a_u8Src); \
776 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[2] = (a_u8Src); \
777 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[3] = (a_u8Src); \
778 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[4] = (a_u8Src); \
779 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[5] = (a_u8Src); \
780 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[6] = (a_u8Src); \
781 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[7] = (a_u8Src); \
782 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[8] = (a_u8Src); \
783 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[9] = (a_u8Src); \
784 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[10] = (a_u8Src); \
785 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[11] = (a_u8Src); \
786 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[12] = (a_u8Src); \
787 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[13] = (a_u8Src); \
788 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[14] = (a_u8Src); \
789 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[15] = (a_u8Src); \
790 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
791 } while (0)
792#define IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX(a_iYRegDst, a_u16Src) \
793 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
794 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[0] = (a_u16Src); \
795 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[1] = (a_u16Src); \
796 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[2] = (a_u16Src); \
797 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[3] = (a_u16Src); \
798 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[4] = (a_u16Src); \
799 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[5] = (a_u16Src); \
800 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[6] = (a_u16Src); \
801 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[7] = (a_u16Src); \
802 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[0] = (a_u16Src); \
803 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[1] = (a_u16Src); \
804 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[2] = (a_u16Src); \
805 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[3] = (a_u16Src); \
806 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[4] = (a_u16Src); \
807 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[5] = (a_u16Src); \
808 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[6] = (a_u16Src); \
809 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[7] = (a_u16Src); \
810 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
811 } while (0)
812#define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
813 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
814 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
815 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = (a_u32Src); \
816 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[2] = (a_u32Src); \
817 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[3] = (a_u32Src); \
818 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[0] = (a_u32Src); \
819 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[1] = (a_u32Src); \
820 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[2] = (a_u32Src); \
821 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[3] = (a_u32Src); \
822 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
823 } while (0)
824#define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
825 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
826 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
827 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Src); \
828 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u64Src); \
829 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u64Src); \
830 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
831 } while (0)
832#define IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
833 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
834 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
835 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
836 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
837 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
838 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
839 } while (0)
840
841#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
842 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
843#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
844 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
845#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
846 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
847#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
848 do { uintptr_t const iYRegTmp = (a_iYReg); \
849 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
850 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
851 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
852 } while (0)
853
854#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
855 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
856 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
857 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
858 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
859 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
860 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
861 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
862 } while (0)
863#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
864 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
865 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
866 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
867 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
868 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
869 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
870 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
871 } while (0)
872#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
873 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
874 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
875 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
876 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
877 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
878 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
879 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
880 } while (0)
881
882#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
883 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
884 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
885 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
886 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
887 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
888 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
889 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
890 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
891 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
892 } while (0)
893#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
894 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
895 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
896 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
897 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
898 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
899 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
900 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
901 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
902 } while (0)
903#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
904 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
905 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
906 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
907 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
908 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
909 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
910 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
911 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
912 } while (0)
913#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
914 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
915 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
916 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
917 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
918 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
919 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
920 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
921 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
922 } while (0)
923#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
924 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
925 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
926 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
927 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
928 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
929 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
930 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
931 } while (0)
932#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
933 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
934 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
935 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
936 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
937 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
938 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
939 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
940 } while (0)
941
942#define IEM_MC_CLEAR_ZREG_256_UP(a_iYReg) \
943 do { IEM_MC_INT_CLEAR_ZMM_256_UP(a_iYReg); } while (0)
944
945#ifndef IEM_WITH_SETJMP
946# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
947 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
948# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
949 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
950# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
951 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
952#else
953# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
954 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
955# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
956 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
957# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
958 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
959
960# define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
961 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
962# define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
963 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
964# define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
965 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
966#endif
967
968#ifndef IEM_WITH_SETJMP
969# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
970 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
971# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
972 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
973# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
974 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
975#else
976# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
977 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
978# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
979 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
980# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
981 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
982
983# define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
984 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
985# define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
986 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
987# define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
988 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
989#endif
990
991#ifndef IEM_WITH_SETJMP
992# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
993 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
994# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
995 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
996# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
997 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
998#else
999# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1000 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1001# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
1002 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
1003# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
1004 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1005
1006# define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
1007 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1008# define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
1009 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
1010# define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
1011 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1012#endif
1013
1014#ifndef IEM_WITH_SETJMP
1015# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1016 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
1017# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
1018 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
1019# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
1020 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
1021# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
1022 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
1023#else
1024# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1025 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1026# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
1027 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
1028# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
1029 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1030# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
1031 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1032
1033# define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
1034 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1035# define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
1036 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
1037# define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
1038 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
1039# define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
1040 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1041#endif
1042
1043#ifndef IEM_WITH_SETJMP
1044# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
1045 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
1046# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
1047 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
1048# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
1049 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
1050# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
1051 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
1052#else
1053# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
1054 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1055# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
1056 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1057# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
1058 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
1059# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
1060 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
1061
1062# define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
1063 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1064# define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
1065 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1066# define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
1067 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
1068# define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
1069 iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
1070#endif
1071
1072#ifndef IEM_WITH_SETJMP
1073# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
1074 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1075# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
1076 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1077# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1078 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1079
1080# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1081 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
1082# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1083 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
1084
1085# define IEM_MC_FETCH_MEM_U128_NO_AC_AND_XREG_U128(a_u128Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1086 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1087 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1088 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1089 } while (0)
1090
1091# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1092 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \
1093 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1094 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1095 } while (0)
1096
1097# define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1098 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1099 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1100 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_Dst).uSrc2.uXmm.au32[(a_iDWord2)], (a_iSeg2), (a_GCPtrMem2))); \
1101 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1102 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1103 } while (0)
1104
1105# define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1106 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1107 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_Dst).uSrc2.uXmm.au64[(a_iQWord2)], (a_iSeg2), (a_GCPtrMem2))); \
1108 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1109 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1110 } while (0)
1111
1112# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1113 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1114 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1115 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1116 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1117 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1118 } while (0)
1119# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1120 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1121 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1122 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1123 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1124 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1125 } while (0)
1126
1127#else
1128# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
1129 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1130# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
1131 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1132# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1133 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1134
1135# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
1136 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1137# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1138 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1139# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1140 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1141
1142# define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
1143 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1144# define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
1145 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1146# define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
1147 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1148
1149# define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
1150 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1151# define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
1152 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1153# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
1154 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1155
1156# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1157 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1158 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1159 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1160 } while (0)
1161# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1162 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1163 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1164 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1165 } while (0)
1166
1167# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1168 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
1169 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1170 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1171 } while (0)
1172# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1173 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
1174 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1175 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1176 } while (0)
1177
1178# define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1179 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1180 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1181 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1182 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1183 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1184 } while (0)
1185# define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
1186 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1187 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1188 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem2)); \
1189 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1190 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1191 } while (0)
1192
1193# define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1194 (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \
1195 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1196 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1197 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1198 } while (0)
1199# define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do { \
1200 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1201 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \
1202 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1203 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1204 } while (0)
1205
1206
1207# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1208 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1209 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1210 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1211 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1212 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1213 } while (0)
1214# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1215 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1216 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1217 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1218 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1219 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1220 } while (0)
1221
1222# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1223 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1224 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1225 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1226 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1227 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1228 } while (0)
1229# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1230 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1231 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1232 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1233 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1234 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1235 } while (0)
1236
1237#endif
1238
1239#ifndef IEM_WITH_SETJMP
1240# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1241 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1242# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1243 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1244# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1245 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1246
1247# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1248 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1249# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1250 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1251# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1252 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1253#else
1254# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1255 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1256# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1257 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1258# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1259 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1260
1261# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1262 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1263# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1264 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1265# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1266 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1267
1268# define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
1269 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1270# define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
1271 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1272# define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
1273 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1274
1275# define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
1276 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1277# define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
1278 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1279# define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
1280 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1281#endif
1282
1283
1284
1285#ifndef IEM_WITH_SETJMP
1286# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1287 do { \
1288 uint8_t u8Tmp; \
1289 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1290 (a_u16Dst) = u8Tmp; \
1291 } while (0)
1292# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1293 do { \
1294 uint8_t u8Tmp; \
1295 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1296 (a_u32Dst) = u8Tmp; \
1297 } while (0)
1298# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1299 do { \
1300 uint8_t u8Tmp; \
1301 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1302 (a_u64Dst) = u8Tmp; \
1303 } while (0)
1304# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1305 do { \
1306 uint16_t u16Tmp; \
1307 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1308 (a_u32Dst) = u16Tmp; \
1309 } while (0)
1310# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1311 do { \
1312 uint16_t u16Tmp; \
1313 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1314 (a_u64Dst) = u16Tmp; \
1315 } while (0)
1316# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1317 do { \
1318 uint32_t u32Tmp; \
1319 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1320 (a_u64Dst) = u32Tmp; \
1321 } while (0)
1322#else /* IEM_WITH_SETJMP */
1323# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1324 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1325# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1326 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1327# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1328 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1329# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1330 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1331# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1332 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1333# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1334 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1335
1336# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
1337 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1338# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
1339 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1340# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
1341 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1342# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
1343 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1344# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
1345 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1346# define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
1347 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1348#endif /* IEM_WITH_SETJMP */
1349
1350#ifndef IEM_WITH_SETJMP
1351# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1352 do { \
1353 uint8_t u8Tmp; \
1354 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1355 (a_u16Dst) = (int8_t)u8Tmp; \
1356 } while (0)
1357# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1358 do { \
1359 uint8_t u8Tmp; \
1360 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1361 (a_u32Dst) = (int8_t)u8Tmp; \
1362 } while (0)
1363# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1364 do { \
1365 uint8_t u8Tmp; \
1366 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1367 (a_u64Dst) = (int8_t)u8Tmp; \
1368 } while (0)
1369# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1370 do { \
1371 uint16_t u16Tmp; \
1372 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1373 (a_u32Dst) = (int16_t)u16Tmp; \
1374 } while (0)
1375# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1376 do { \
1377 uint16_t u16Tmp; \
1378 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1379 (a_u64Dst) = (int16_t)u16Tmp; \
1380 } while (0)
1381# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1382 do { \
1383 uint32_t u32Tmp; \
1384 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1385 (a_u64Dst) = (int32_t)u32Tmp; \
1386 } while (0)
1387#else /* IEM_WITH_SETJMP */
1388# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1389 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1390# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1391 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1392# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1393 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1394# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1395 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1396# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1397 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1398# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1399 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1400
1401# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
1402 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1403# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
1404 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1405# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
1406 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1407# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
1408 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1409# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
1410 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1411# define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
1412 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1413#endif /* IEM_WITH_SETJMP */
1414
1415#ifndef IEM_WITH_SETJMP
1416# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1417 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
1418# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1419 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
1420# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1421 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
1422# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1423 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
1424#else
1425# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1426 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
1427# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1428 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
1429# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1430 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
1431# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1432 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
1433
1434# define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
1435 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
1436# define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
1437 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
1438# define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
1439 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
1440# define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
1441 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
1442#endif
1443
1444#ifndef IEM_WITH_SETJMP
1445# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1446 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
1447# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1448 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
1449# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1450 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
1451# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1452 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
1453#else
1454# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1455 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
1456# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1457 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
1458# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1459 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
1460# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1461 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
1462
1463# define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
1464 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
1465# define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
1466 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
1467# define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
1468 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
1469# define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
1470 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
1471#endif
1472
1473#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
1474#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
1475#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
1476#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
1477#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
1478#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
1479#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
1480 do { \
1481 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1482 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
1483 } while (0)
1484#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
1485 do { \
1486 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1487 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
1488 } while (0)
1489
1490#ifndef IEM_WITH_SETJMP
1491# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1492 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
1493# define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1494 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
1495# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1496 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1497#else
1498# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1499 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1500# define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1501 iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1502# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1503 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1504
1505# define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
1506 iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1507# define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
1508 iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1509# define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
1510 iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
1511#endif
1512
1513#ifndef IEM_WITH_SETJMP
1514# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1515 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1516# define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1517 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1518# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1519 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1520#else
1521# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1522 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1523# define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1524 iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1525# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1526 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1527
1528# define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
1529 iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1530# define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
1531 iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1532# define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
1533 iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1534#endif
1535
1536/* Regular stack push and pop: */
1537#ifndef IEM_WITH_SETJMP
1538# define IEM_MC_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1539# define IEM_MC_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1540# define IEM_MC_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
1541# define IEM_MC_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1542
1543# define IEM_MC_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1544# define IEM_MC_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
1545# define IEM_MC_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
1546#else
1547# define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
1548# define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
1549# define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
1550# define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
1551
1552# define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
1553# define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
1554# define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
1555#endif
1556
1557/* 32-bit flat stack push and pop: */
1558#ifndef IEM_WITH_SETJMP
1559# define IEM_MC_FLAT32_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1560# define IEM_MC_FLAT32_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1561# define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
1562
1563# define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1564# define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
1565#else
1566# define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
1567# define IEM_MC_FLAT32_PUSH_U32(a_u32Value) iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
1568# define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
1569
1570# define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
1571# define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
1572#endif
1573
1574/* 64-bit flat stack push and pop: */
1575#ifndef IEM_WITH_SETJMP
1576# define IEM_MC_FLAT64_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1577# define IEM_MC_FLAT64_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1578
1579# define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1580# define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
1581#else
1582# define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
1583# define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
1584
1585# define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
1586# define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
1587#endif
1588
1589
1590/* 8-bit */
1591
1592/**
1593 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1594 * acccess, for atomic operations.
1595 *
1596 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1597 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1598 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1599 * @param[in] a_GCPtrMem The memory address.
1600 * @remarks Will return/long jump on errors.
1601 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1602 */
1603#ifndef IEM_WITH_SETJMP
1604# define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1605 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1606 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
1607#else
1608# define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1609 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1610#endif
1611
1612/**
1613 * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
1614 *
1615 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1616 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1617 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1618 * @param[in] a_GCPtrMem The memory address.
1619 * @remarks Will return/long jump on errors.
1620 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1621 */
1622#ifndef IEM_WITH_SETJMP
1623# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1624 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1625 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
1626#else
1627# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1628 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1629#endif
1630
1631/**
1632 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
1633 *
1634 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1635 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1636 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1637 * @param[in] a_GCPtrMem The memory address.
1638 * @remarks Will return/long jump on errors.
1639 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1640 */
1641#ifndef IEM_WITH_SETJMP
1642# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1643 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1644 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
1645#else
1646# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1647 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1648#endif
1649
1650/**
1651 * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
1652 *
1653 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1654 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1655 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1656 * @param[in] a_GCPtrMem The memory address.
1657 * @remarks Will return/long jump on errors.
1658 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1659 */
1660#ifndef IEM_WITH_SETJMP
1661# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1662 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1663 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
1664#else
1665# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1666 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1667#endif
1668
1669/**
1670 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1671 * acccess, flat address variant.
1672 *
1673 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1674 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1675 * @param[in] a_GCPtrMem The memory address.
1676 * @remarks Will return/long jump on errors.
1677 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1678 */
1679#ifndef IEM_WITH_SETJMP
1680# define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1681 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1682 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
1683#else
1684# define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1685 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1686#endif
1687
1688/**
1689 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
1690 * address variant.
1691 *
1692 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1693 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1694 * @param[in] a_GCPtrMem The memory address.
1695 * @remarks Will return/long jump on errors.
1696 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1697 */
1698#ifndef IEM_WITH_SETJMP
1699# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1700 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1701 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
1702#else
1703# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1704 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1705#endif
1706
1707/**
1708 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
1709 * address variant.
1710 *
1711 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1712 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1713 * @param[in] a_GCPtrMem The memory address.
1714 * @remarks Will return/long jump on errors.
1715 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1716 */
1717#ifndef IEM_WITH_SETJMP
1718# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1719 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1720 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
1721#else
1722# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1723 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1724#endif
1725
1726/**
1727 * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
1728 * address variant.
1729 *
1730 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1731 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1732 * @param[in] a_GCPtrMem The memory address.
1733 * @remarks Will return/long jump on errors.
1734 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1735 */
1736#ifndef IEM_WITH_SETJMP
1737# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1738 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1739 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
1740#else
1741# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1742 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1743#endif
1744
1745
1746/* 16-bit */
1747
1748/**
1749 * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess.
1750 *
1751 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1752 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1753 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1754 * @param[in] a_GCPtrMem The memory address.
1755 * @remarks Will return/long jump on errors.
1756 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1757 */
1758#ifndef IEM_WITH_SETJMP
1759# define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1760 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1761 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
1762#else
1763# define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1764 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1765#endif
1766
1767/**
1768 * Maps guest memory for word read+write direct (or bounce) buffer acccess.
1769 *
1770 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1771 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1772 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1773 * @param[in] a_GCPtrMem The memory address.
1774 * @remarks Will return/long jump on errors.
1775 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1776 */
1777#ifndef IEM_WITH_SETJMP
1778# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1779 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1780 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
1781#else
1782# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1783 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1784#endif
1785
1786/**
1787 * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
1788 *
1789 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1790 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1791 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1792 * @param[in] a_GCPtrMem The memory address.
1793 * @remarks Will return/long jump on errors.
1794 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1795 */
1796#ifndef IEM_WITH_SETJMP
1797# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1798 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1799 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
1800#else
1801# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1802 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1803#endif
1804
1805/**
1806 * Maps guest memory for word readonly direct (or bounce) buffer acccess.
1807 *
1808 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1809 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1810 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1811 * @param[in] a_GCPtrMem The memory address.
1812 * @remarks Will return/long jump on errors.
1813 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1814 */
1815#ifndef IEM_WITH_SETJMP
1816# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1817 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1818 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
1819#else
1820# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1821 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1822#endif
1823
1824/**
1825 * Maps guest memory for word atomic read+write direct (or bounce) buffer
1826 * acccess, flat address variant.
1827 *
1828 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1829 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1830 * @param[in] a_GCPtrMem The memory address.
1831 * @remarks Will return/long jump on errors.
1832 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1833 */
1834#ifndef IEM_WITH_SETJMP
1835# define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1836 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1837 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
1838#else
1839# define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1840 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1841#endif
1842
1843/**
1844 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
1845 * address variant.
1846 *
1847 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1848 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1849 * @param[in] a_GCPtrMem The memory address.
1850 * @remarks Will return/long jump on errors.
1851 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1852 */
1853#ifndef IEM_WITH_SETJMP
1854# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1855 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1856 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
1857#else
1858# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1859 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1860#endif
1861
1862/**
1863 * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
1864 * address variant.
1865 *
1866 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1867 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1868 * @param[in] a_GCPtrMem The memory address.
1869 * @remarks Will return/long jump on errors.
1870 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1871 */
1872#ifndef IEM_WITH_SETJMP
1873# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1874 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1875 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
1876#else
1877# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1878 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1879#endif
1880
1881/**
1882 * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
1883 * address variant.
1884 *
1885 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1886 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1887 * @param[in] a_GCPtrMem The memory address.
1888 * @remarks Will return/long jump on errors.
1889 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1890 */
1891#ifndef IEM_WITH_SETJMP
1892# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1893 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1894 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
1895#else
1896# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1897 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1898#endif
1899
1900/** int16_t alias. */
1901#ifndef IEM_WITH_SETJMP
1902# define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1903 IEM_MC_MEM_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
1904#else
1905# define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1906 (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1907#endif
1908
1909/** Flat int16_t alias. */
1910#ifndef IEM_WITH_SETJMP
1911# define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1912 IEM_MC_MEM_FLAT_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem)
1913#else
1914# define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1915 (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1916#endif
1917
1918
1919/* 32-bit */
1920
1921/**
1922 * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess.
1923 *
1924 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1925 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1926 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1927 * @param[in] a_GCPtrMem The memory address.
1928 * @remarks Will return/long jump on errors.
1929 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1930 */
1931#ifndef IEM_WITH_SETJMP
1932# define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1933 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1934 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
1935#else
1936# define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1937 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1938#endif
1939
1940/**
1941 * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
1942 *
1943 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1944 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1945 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1946 * @param[in] a_GCPtrMem The memory address.
1947 * @remarks Will return/long jump on errors.
1948 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1949 */
1950#ifndef IEM_WITH_SETJMP
1951# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1952 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1953 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
1954#else
1955# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1956 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1957#endif
1958
1959/**
1960 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
1961 *
1962 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1963 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1964 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1965 * @param[in] a_GCPtrMem The memory address.
1966 * @remarks Will return/long jump on errors.
1967 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1968 */
1969#ifndef IEM_WITH_SETJMP
1970# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1971 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1972 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
1973#else
1974# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1975 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1976#endif
1977
1978/**
1979 * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
1980 *
1981 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1982 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1983 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1984 * @param[in] a_GCPtrMem The memory address.
1985 * @remarks Will return/long jump on errors.
1986 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1987 */
1988#ifndef IEM_WITH_SETJMP
1989# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1990 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1991 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
1992#else
1993# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1994 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1995#endif
1996
1997/**
1998 * Maps guest memory for dword atomic read+write direct (or bounce) buffer
1999 * acccess, flat address variant.
2000 *
2001 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
2002 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2003 * @param[in] a_GCPtrMem The memory address.
2004 * @remarks Will return/long jump on errors.
2005 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2006 */
2007#ifndef IEM_WITH_SETJMP
2008# define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2009 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2010 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
2011#else
2012# define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2013 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2014#endif
2015
2016/**
2017 * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
2018 * flat address variant.
2019 *
2020 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
2021 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2022 * @param[in] a_GCPtrMem The memory address.
2023 * @remarks Will return/long jump on errors.
2024 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2025 */
2026#ifndef IEM_WITH_SETJMP
2027# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2028 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2029 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
2030#else
2031# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2032 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2033#endif
2034
2035/**
2036 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
2037 * address variant.
2038 *
2039 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
2040 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2041 * @param[in] a_GCPtrMem The memory address.
2042 * @remarks Will return/long jump on errors.
2043 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2044 */
2045#ifndef IEM_WITH_SETJMP
2046# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2047 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2048 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
2049#else
2050# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2051 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2052#endif
2053
2054/**
2055 * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
2056 * address variant.
2057 *
2058 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
2059 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2060 * @param[in] a_GCPtrMem The memory address.
2061 * @remarks Will return/long jump on errors.
2062 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2063 */
2064#ifndef IEM_WITH_SETJMP
2065# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2066 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2067 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
2068#else
2069# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2070 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2071#endif
2072
2073/** int32_t alias. */
2074#ifndef IEM_WITH_SETJMP
2075# define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2076 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2077#else
2078# define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2079 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2080#endif
2081
2082/** Flat int32_t alias. */
2083#ifndef IEM_WITH_SETJMP
2084# define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
2085 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem)
2086#else
2087# define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
2088 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2089#endif
2090
2091/** RTFLOAT32U alias. */
2092#ifndef IEM_WITH_SETJMP
2093# define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2094 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2095#else
2096# define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2097 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2098#endif
2099
2100/** Flat RTFLOAT32U alias. */
2101#ifndef IEM_WITH_SETJMP
2102# define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
2103 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem)
2104#else
2105# define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
2106 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2107#endif
2108
2109
2110/* 64-bit */
2111
2112/**
2113 * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess.
2114 *
2115 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2116 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2117 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2118 * @param[in] a_GCPtrMem The memory address.
2119 * @remarks Will return/long jump on errors.
2120 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2121 */
2122#ifndef IEM_WITH_SETJMP
2123# define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2124 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2125 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
2126#else
2127# define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2128 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2129#endif
2130
2131/**
2132 * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
2133 *
2134 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2135 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2136 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2137 * @param[in] a_GCPtrMem The memory address.
2138 * @remarks Will return/long jump on errors.
2139 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2140 */
2141#ifndef IEM_WITH_SETJMP
2142# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2143 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2144 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
2145#else
2146# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2147 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2148#endif
2149
2150/**
2151 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
2152 *
2153 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2154 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2155 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2156 * @param[in] a_GCPtrMem The memory address.
2157 * @remarks Will return/long jump on errors.
2158 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2159 */
2160#ifndef IEM_WITH_SETJMP
2161# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2162 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2163 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2164#else
2165# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2166 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2167#endif
2168
2169/**
2170 * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
2171 *
2172 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2173 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2174 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2175 * @param[in] a_GCPtrMem The memory address.
2176 * @remarks Will return/long jump on errors.
2177 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2178 */
2179#ifndef IEM_WITH_SETJMP
2180# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2181 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2182 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
2183#else
2184# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2185 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2186#endif
2187
2188/**
2189 * Maps guest memory for qword atomic read+write direct (or bounce) buffer
2190 * acccess, flat address variant.
2191 *
2192 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2193 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2194 * @param[in] a_GCPtrMem The memory address.
2195 * @remarks Will return/long jump on errors.
2196 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2197 */
2198#ifndef IEM_WITH_SETJMP
2199# define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2200 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2201 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
2202#else
2203# define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2204 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2205#endif
2206
2207/**
2208 * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
2209 * flat address variant.
2210 *
2211 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2212 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2213 * @param[in] a_GCPtrMem The memory address.
2214 * @remarks Will return/long jump on errors.
2215 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2216 */
2217#ifndef IEM_WITH_SETJMP
2218# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2219 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2220 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
2221#else
2222# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2223 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2224#endif
2225
2226/**
2227 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
2228 * address variant.
2229 *
2230 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2231 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2232 * @param[in] a_GCPtrMem The memory address.
2233 * @remarks Will return/long jump on errors.
2234 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2235 */
2236#ifndef IEM_WITH_SETJMP
2237# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2238 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2239 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2240#else
2241# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2242 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2243#endif
2244
2245/**
2246 * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
2247 * address variant.
2248 *
2249 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2250 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2251 * @param[in] a_GCPtrMem The memory address.
2252 * @remarks Will return/long jump on errors.
2253 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2254 */
2255#ifndef IEM_WITH_SETJMP
2256# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2257 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2258 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
2259#else
2260# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2261 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2262#endif
2263
2264/** int64_t alias. */
2265#ifndef IEM_WITH_SETJMP
2266# define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2267 IEM_MC_MEM_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2268#else
2269# define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2270 (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2271#endif
2272
2273/** Flat int64_t alias. */
2274#ifndef IEM_WITH_SETJMP
2275# define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
2276 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem)
2277#else
2278# define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
2279 (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2280#endif
2281
2282/** RTFLOAT64U alias. */
2283#ifndef IEM_WITH_SETJMP
2284# define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2285 IEM_MC_MEM_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2286#else
2287# define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2288 (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2289#endif
2290
2291/** Flat RTFLOAT64U alias. */
2292#ifndef IEM_WITH_SETJMP
2293# define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
2294 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem)
2295#else
2296# define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
2297 (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2298#endif
2299
2300
2301/* 128-bit */
2302
2303/**
2304 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess.
2305 *
2306 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2307 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2308 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2309 * @param[in] a_GCPtrMem The memory address.
2310 * @remarks Will return/long jump on errors.
2311 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2312 */
2313#ifndef IEM_WITH_SETJMP
2314# define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2315 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
2316 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1))
2317#else
2318# define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2319 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2320#endif
2321
2322/**
2323 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess.
2324 *
2325 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2326 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2327 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2328 * @param[in] a_GCPtrMem The memory address.
2329 * @remarks Will return/long jump on errors.
2330 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2331 */
2332#ifndef IEM_WITH_SETJMP
2333# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2334 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
2335 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1))
2336#else
2337# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2338 (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2339#endif
2340
2341/**
2342 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess.
2343 *
2344 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2345 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2346 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2347 * @param[in] a_GCPtrMem The memory address.
2348 * @remarks Will return/long jump on errors.
2349 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2350 */
2351#ifndef IEM_WITH_SETJMP
2352# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2353 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
2354 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
2355#else
2356# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2357 (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2358#endif
2359
2360/**
2361 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess.
2362 *
2363 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2364 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2365 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2366 * @param[in] a_GCPtrMem The memory address.
2367 * @remarks Will return/long jump on errors.
2368 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2369 */
2370#ifndef IEM_WITH_SETJMP
2371# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2372 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
2373 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
2374#else
2375# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2376 (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2377#endif
2378
2379/**
2380 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer
2381 * access, flat address variant.
2382 *
2383 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2384 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2385 * @param[in] a_GCPtrMem The memory address.
2386 * @remarks Will return/long jump on errors.
2387 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2388 */
2389#ifndef IEM_WITH_SETJMP
2390# define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2391 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2392 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128) - 1))
2393#else
2394# define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2395 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2396#endif
2397
2398/**
2399 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess,
2400 * flat address variant.
2401 *
2402 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2403 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2404 * @param[in] a_GCPtrMem The memory address.
2405 * @remarks Will return/long jump on errors.
2406 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2407 */
2408#ifndef IEM_WITH_SETJMP
2409# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2410 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2411 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1))
2412#else
2413# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2414 (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2415#endif
2416
2417/**
2418 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess,
2419 * flat address variant.
2420 *
2421 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2422 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2423 * @param[in] a_GCPtrMem The memory address.
2424 * @remarks Will return/long jump on errors.
2425 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2426 */
2427#ifndef IEM_WITH_SETJMP
2428# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2429 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2430 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
2431#else
2432# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2433 (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2434#endif
2435
2436/**
2437 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess, flat
2438 * address variant.
2439 *
2440 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2441 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2442 * @param[in] a_GCPtrMem The memory address.
2443 * @remarks Will return/long jump on errors.
2444 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2445 */
2446#ifndef IEM_WITH_SETJMP
2447# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2448 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2449 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
2450#else
2451# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2452 (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2453#endif
2454
2455
2456/* misc */
2457
2458/**
2459 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
2460 *
2461 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
2462 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2463 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2464 * @param[in] a_GCPtrMem The memory address.
2465 * @remarks Will return/long jump on errors.
2466 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2467 */
2468#ifndef IEM_WITH_SETJMP
2469# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2470 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
2471 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2472#else
2473# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2474 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2475#endif
2476
2477/**
2478 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
2479 *
2480 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
2481 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2482 * @param[in] a_GCPtrMem The memory address.
2483 * @remarks Will return/long jump on errors.
2484 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2485 */
2486#ifndef IEM_WITH_SETJMP
2487# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
2488 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
2489 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2490#else
2491# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
2492 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2493#endif
2494
2495
2496/**
2497 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2498 *
2499 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2500 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2501 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2502 * @param[in] a_GCPtrMem The memory address.
2503 * @remarks Will return/long jump on errors.
2504 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2505 */
2506#ifndef IEM_WITH_SETJMP
2507# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2508 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
2509 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2510#else
2511# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2512 (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2513#endif
2514
2515/**
2516 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2517 *
2518 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2519 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2520 * @param[in] a_GCPtrMem The memory address.
2521 * @remarks Will return/long jump on errors.
2522 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2523 */
2524#ifndef IEM_WITH_SETJMP
2525# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2526 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
2527 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2528#else
2529# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2530 (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2531#endif
2532
2533
2534
2535/* commit + unmap */
2536
2537/** Commits the memory and unmaps guest memory previously mapped RW.
2538 * @remarks May return.
2539 * @note Implictly frees the a_bMapInfo variable.
2540 */
2541#ifndef IEM_WITH_SETJMP
2542# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2543#else
2544# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2545#endif
2546
2547/** Commits the memory and unmaps guest memory previously mapped ATOMIC.
2548 * @remarks May return.
2549 * @note Implictly frees the a_bMapInfo variable.
2550 */
2551#ifndef IEM_WITH_SETJMP
2552# define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2553#else
2554# define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2555#endif
2556
2557/** Commits the memory and unmaps guest memory previously mapped W.
2558 * @remarks May return.
2559 * @note Implictly frees the a_bMapInfo variable.
2560 */
2561#ifndef IEM_WITH_SETJMP
2562# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2563#else
2564# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
2565#endif
2566
2567/** Commits the memory and unmaps guest memory previously mapped R.
2568 * @remarks May return.
2569 * @note Implictly frees the a_bMapInfo variable.
2570 */
2571#ifndef IEM_WITH_SETJMP
2572# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2573#else
2574# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
2575#endif
2576
2577
2578/** Commits the memory and unmaps the guest memory unless the FPU status word
2579 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
2580 * that would cause FLD not to store.
2581 *
2582 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
2583 * store, while \#P will not.
2584 *
2585 * @remarks May in theory return - for now.
2586 * @note Implictly frees both the a_bMapInfo and a_u16FSW variables.
2587 */
2588#ifndef IEM_WITH_SETJMP
2589# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2590 if ( !(a_u16FSW & X86_FSW_ES) \
2591 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2592 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2593 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \
2594 else \
2595 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
2596 } while (0)
2597#else
2598# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2599 if ( !(a_u16FSW & X86_FSW_ES) \
2600 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2601 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2602 iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \
2603 else \
2604 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
2605 } while (0)
2606#endif
2607
2608/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
2609 * @note Implictly frees the a_bMapInfo variable. */
2610#ifndef IEM_WITH_SETJMP
2611# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmap(pVCpu, a_bMapInfo)
2612#else
2613# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
2614#endif
2615
2616
2617
2618/** Calculate efficient address from R/M. */
2619#ifndef IEM_WITH_SETJMP
2620# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2621 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff)))
2622#else
2623# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2624 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
2625#endif
2626
2627
2628/** The @a a_fSupportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */
2629#define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (false) {
2630#define IEM_MC_NATIVE_ELSE() } else {
2631#define IEM_MC_NATIVE_ENDIF() } ((void)0)
2632
2633#define IEM_MC_NATIVE_EMIT_0(a_fnEmitter)
2634#define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0) (void)(a0)
2635#define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
2636#define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2) (void)(a0), (void)(a1), (void)(a2)
2637#define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3) (void)(a0), (void)(a1), (void)(a2), (void)(a3)
2638#define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4)
2639#define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5)
2640#define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6)
2641#define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7)
2642
2643/** This can be used to direct the register allocator when dealing with
2644 * x86/AMD64 instructions (like SHL reg,CL) that takes fixed registers. */
2645#define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0)
2646
2647
2648#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
2649#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
2650#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
2651#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
2652#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
2653#define IEM_MC_CALL_AIMPL_3(a_rcType, a_rc, a_pfn, a0, a1, a2) a_rcType const a_rc = (a_pfn)((a0), (a1), (a2))
2654#define IEM_MC_CALL_AIMPL_4(a_rcType, a_rc, a_pfn, a0, a1, a2, a3) a_rcType const a_rc = (a_pfn)((a0), (a1), (a2), (a3))
2655
2656
2657/** @def IEM_MC_CALL_CIMPL_HLP_RET
2658 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
2659 */
2660#ifdef VBOX_STRICT
2661#define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
2662 do { \
2663 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
2664 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \
2665 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \
2666 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \
2667 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \
2668 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
2669 if (rcStrictHlp == VINF_SUCCESS) \
2670 { \
2671 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
2672 || ( uRipBefore + cbInstr == pVCpu->cpum.GstCtx.rip \
2673 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \
2674 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \
2675 && uRipBefore == pVCpu->cpum.GstCtx.rip \
2676 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \
2677 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
2678 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, uRipBefore + cbInstr)); \
2679 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
2680 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
2681 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
2682 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
2683 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
2684 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2685 else \
2686 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
2687 == (fEflBefore & ~(X86_EFL_RF)), \
2688 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2689 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
2690 { \
2691 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
2692 AssertMsg( fExecBefore == fExecRecalc \
2693 /* in case ES, DS or SS was external initially (happens alot with HM): */ \
2694 || ( fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \
2695 && (fExecRecalc & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT), \
2696 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
2697 } \
2698 } \
2699 return rcStrictHlp; \
2700 } while (0)
2701#else
2702# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
2703#endif
2704
2705/**
2706 * Defers the rest of the instruction emulation to a C implementation routine
2707 * and returns, only taking the standard parameters.
2708 *
2709 * @param a_fFlags IEM_CIMPL_F_XXX.
2710 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2711 * in the native recompiler.
2712 * @param a_pfnCImpl The pointer to the C routine.
2713 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2714 */
2715#define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2716 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2717
2718/**
2719 * Defers the rest of instruction emulation to a C implementation routine and
2720 * returns, taking one argument in addition to the standard ones.
2721 *
2722 * @param a_fFlags IEM_CIMPL_F_XXX.
2723 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2724 * in the native recompiler.
2725 * @param a_pfnCImpl The pointer to the C routine.
2726 * @param a0 The argument.
2727 */
2728#define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2729 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2730
2731/**
2732 * Defers the rest of the instruction emulation to a C implementation routine
2733 * and returns, taking two arguments in addition to the standard ones.
2734 *
2735 * @param a_fFlags IEM_CIMPL_F_XXX.
2736 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2737 * in the native recompiler.
2738 * @param a_pfnCImpl The pointer to the C routine.
2739 * @param a0 The first extra argument.
2740 * @param a1 The second extra argument.
2741 */
2742#define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2743 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2744
2745/**
2746 * Defers the rest of the instruction emulation to a C implementation routine
2747 * and returns, taking three arguments in addition to the standard ones.
2748 *
2749 * @param a_fFlags IEM_CIMPL_F_XXX.
2750 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2751 * in the native recompiler.
2752 * @param a_pfnCImpl The pointer to the C routine.
2753 * @param a0 The first extra argument.
2754 * @param a1 The second extra argument.
2755 * @param a2 The third extra argument.
2756 */
2757#define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2758 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2759
2760/**
2761 * Defers the rest of the instruction emulation to a C implementation routine
2762 * and returns, taking four arguments in addition to the standard ones.
2763 *
2764 * @param a_fFlags IEM_CIMPL_F_XXX.
2765 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2766 * in the native recompiler.
2767 * @param a_pfnCImpl The pointer to the C routine.
2768 * @param a0 The first extra argument.
2769 * @param a1 The second extra argument.
2770 * @param a2 The third extra argument.
2771 * @param a3 The fourth extra argument.
2772 */
2773#define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
2774 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
2775
2776/**
2777 * Defers the rest of the instruction emulation to a C implementation routine
2778 * and returns, taking two arguments in addition to the standard ones.
2779 *
2780 * @param a_fFlags IEM_CIMPL_F_XXX.
2781 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2782 * in the native recompiler.
2783 * @param a_pfnCImpl The pointer to the C routine.
2784 * @param a0 The first extra argument.
2785 * @param a1 The second extra argument.
2786 * @param a2 The third extra argument.
2787 * @param a3 The fourth extra argument.
2788 * @param a4 The fifth extra argument.
2789 */
2790#define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
2791 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
2792
2793/**
2794 * Defers the entire instruction emulation to a C implementation routine and
2795 * returns, only taking the standard parameters.
2796 *
2797 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2798 *
2799 * @param a_fFlags IEM_CIMPL_F_XXX.
2800 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2801 * in the native recompiler.
2802 * @param a_pfnCImpl The pointer to the C routine.
2803 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2804 */
2805#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2806 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2807
2808/**
2809 * Defers the entire instruction emulation to a C implementation routine and
2810 * returns, taking one argument in addition to the standard ones.
2811 *
2812 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2813 *
2814 * @param a_fFlags IEM_CIMPL_F_XXX.
2815 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2816 * in the native recompiler.
2817 * @param a_pfnCImpl The pointer to the C routine.
2818 * @param a0 The argument.
2819 */
2820#define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2821 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2822
2823/**
2824 * Defers the entire instruction emulation to a C implementation routine and
2825 * returns, taking two arguments in addition to the standard ones.
2826 *
2827 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2828 *
2829 * @param a_fFlags IEM_CIMPL_F_XXX.
2830 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2831 * in the native recompiler.
2832 * @param a_pfnCImpl The pointer to the C routine.
2833 * @param a0 The first extra argument.
2834 * @param a1 The second extra argument.
2835 */
2836#define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2837 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2838
2839/**
2840 * Defers the entire instruction emulation to a C implementation routine and
2841 * returns, taking three arguments in addition to the standard ones.
2842 *
2843 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2844 *
2845 * @param a_fFlags IEM_CIMPL_F_XXX.
2846 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2847 * in the native recompiler.
2848 * @param a_pfnCImpl The pointer to the C routine.
2849 * @param a0 The first extra argument.
2850 * @param a1 The second extra argument.
2851 * @param a2 The third extra argument.
2852 */
2853#define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2854 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2855
2856
2857/**
2858 * Calls a FPU assembly implementation taking one visible argument.
2859 *
2860 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2861 * @param a0 The first extra argument.
2862 */
2863#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
2864 do { \
2865 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
2866 } while (0)
2867
2868/**
2869 * Calls a FPU assembly implementation taking two visible arguments.
2870 *
2871 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2872 * @param a0 The first extra argument.
2873 * @param a1 The second extra argument.
2874 */
2875#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
2876 do { \
2877 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2878 } while (0)
2879
2880/**
2881 * Calls a FPU assembly implementation taking three visible arguments.
2882 *
2883 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2884 * @param a0 The first extra argument.
2885 * @param a1 The second extra argument.
2886 * @param a2 The third extra argument.
2887 */
2888#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2889 do { \
2890 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2891 } while (0)
2892
2893#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
2894 do { \
2895 (a_FpuData).FSW = (a_FSW); \
2896 (a_FpuData).r80Result = *(a_pr80Value); \
2897 } while (0)
2898
2899/** Pushes FPU result onto the stack. */
2900#define IEM_MC_PUSH_FPU_RESULT(a_FpuData, a_uFpuOpcode) \
2901 iemFpuPushResult(pVCpu, &a_FpuData, a_uFpuOpcode)
2902/** Pushes FPU result onto the stack and sets the FPUDP. */
2903#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2904 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2905
2906/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
2907#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo, a_uFpuOpcode) \
2908 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo, a_uFpuOpcode)
2909
2910/** Stores FPU result in a stack register. */
2911#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg, a_uFpuOpcode) \
2912 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2913/** Stores FPU result in a stack register and pops the stack. */
2914#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg, a_uFpuOpcode) \
2915 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2916/** Stores FPU result in a stack register and sets the FPUDP. */
2917#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2918 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2919/** Stores FPU result in a stack register, sets the FPUDP, and pops the
2920 * stack. */
2921#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2922 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2923
2924/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
2925#define IEM_MC_UPDATE_FPU_OPCODE_IP(a_uFpuOpcode) \
2926 iemFpuUpdateOpcodeAndIp(pVCpu, a_uFpuOpcode)
2927/** Free a stack register (for FFREE and FFREEP). */
2928#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
2929 iemFpuStackFree(pVCpu, a_iStReg)
2930/** Increment the FPU stack pointer. */
2931#define IEM_MC_FPU_STACK_INC_TOP() \
2932 iemFpuStackIncTop(pVCpu)
2933/** Decrement the FPU stack pointer. */
2934#define IEM_MC_FPU_STACK_DEC_TOP() \
2935 iemFpuStackDecTop(pVCpu)
2936
2937/** Updates the FSW, FOP, FPUIP, and FPUCS. */
2938#define IEM_MC_UPDATE_FSW(a_u16FSW, a_uFpuOpcode) \
2939 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2940/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
2941#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW, a_uFpuOpcode) \
2942 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2943/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
2944#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2945 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2946/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
2947#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW, a_uFpuOpcode) \
2948 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2949/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
2950 * stack. */
2951#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2952 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2953/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
2954#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW, a_uFpuOpcode) \
2955 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2956
2957/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
2958#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst, a_uFpuOpcode) \
2959 iemFpuStackUnderflow(pVCpu, a_iStDst, a_uFpuOpcode)
2960/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2961 * stack. */
2962#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst, a_uFpuOpcode) \
2963 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst, a_uFpuOpcode)
2964/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2965 * FPUDS. */
2966#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2967 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2968/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2969 * FPUDS. Pops stack. */
2970#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2971 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2972/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2973 * stack twice. */
2974#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP(a_uFpuOpcode) \
2975 iemFpuStackUnderflowThenPopPop(pVCpu, a_uFpuOpcode)
2976/** Raises a FPU stack underflow exception for an instruction pushing a result
2977 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
2978#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW(a_uFpuOpcode) \
2979 iemFpuStackPushUnderflow(pVCpu, a_uFpuOpcode)
2980/** Raises a FPU stack underflow exception for an instruction pushing a result
2981 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
2982#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO(a_uFpuOpcode) \
2983 iemFpuStackPushUnderflowTwo(pVCpu, a_uFpuOpcode)
2984
2985/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2986 * FPUIP, FPUCS and FOP. */
2987#define IEM_MC_FPU_STACK_PUSH_OVERFLOW(a_uFpuOpcode) \
2988 iemFpuStackPushOverflow(pVCpu, a_uFpuOpcode)
2989/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2990 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
2991#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2992 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2993/** Prepares for using the FPU state.
2994 * Ensures that we can use the host FPU in the current context (RC+R0.
2995 * Ensures the guest FPU state in the CPUMCTX is up to date. */
2996#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
2997/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
2998#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
2999/** Actualizes the guest FPU state so it can be accessed and modified. */
3000#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
3001
3002/** Prepares for using the SSE state.
3003 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
3004 * Ensures the guest SSE state in the CPUMCTX is up to date. */
3005#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
3006/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
3007#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
3008/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
3009#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
3010
3011/** Prepares for using the AVX state.
3012 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
3013 * Ensures the guest AVX state in the CPUMCTX is up to date.
3014 * @note This will include the AVX512 state too when support for it is added
3015 * due to the zero extending feature of VEX instruction. */
3016#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
3017/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
3018#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
3019/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
3020#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
3021
3022/**
3023 * Calls a MMX assembly implementation taking two visible arguments.
3024 *
3025 * @param a_pfnAImpl Pointer to the assembly MMX routine.
3026 * @param a0 The first extra argument.
3027 * @param a1 The second extra argument.
3028 */
3029#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
3030 do { \
3031 IEM_MC_PREPARE_FPU_USAGE(); \
3032 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
3033 } while (0)
3034
3035/**
3036 * Calls a MMX assembly implementation taking three visible arguments.
3037 *
3038 * @param a_pfnAImpl Pointer to the assembly MMX routine.
3039 * @param a0 The first extra argument.
3040 * @param a1 The second extra argument.
3041 * @param a2 The third extra argument.
3042 */
3043#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
3044 do { \
3045 IEM_MC_PREPARE_FPU_USAGE(); \
3046 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
3047 } while (0)
3048
3049
3050/**
3051 * Calls a SSE assembly implementation taking two visible arguments.
3052 *
3053 * @param a_pfnAImpl Pointer to the assembly SSE routine.
3054 * @param a0 The first extra argument.
3055 * @param a1 The second extra argument.
3056 */
3057#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
3058 do { \
3059 IEM_MC_PREPARE_SSE_USAGE(); \
3060 pVCpu->cpum.GstCtx.XState.x87.MXCSR = a_pfnAImpl(pVCpu->cpum.GstCtx.XState.x87.MXCSR & ~X86_MXCSR_XCPT_FLAGS, \
3061 (a0), (a1)); \
3062 } while (0)
3063
3064/**
3065 * Calls a SSE assembly implementation taking three visible arguments.
3066 *
3067 * @param a_pfnAImpl Pointer to the assembly SSE routine.
3068 * @param a0 The first extra argument.
3069 * @param a1 The second extra argument.
3070 * @param a2 The third extra argument.
3071 */
3072#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
3073 do { \
3074 IEM_MC_PREPARE_SSE_USAGE(); \
3075 pVCpu->cpum.GstCtx.XState.x87.MXCSR = a_pfnAImpl(pVCpu->cpum.GstCtx.XState.x87.MXCSR & ~X86_MXCSR_XCPT_FLAGS, \
3076 (a0), (a1), (a2)); \
3077 } while (0)
3078
3079
3080/**
3081 * Calls a AVX assembly implementation taking two visible arguments.
3082 *
3083 * There is one implicit zero'th argument, a pointer to the extended state.
3084 *
3085 * @param a_pfnAImpl Pointer to the assembly AVX routine.
3086 * @param a0 The first extra argument.
3087 * @param a1 The second extra argument.
3088 */
3089#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a0, a1) \
3090 do { \
3091 IEM_MC_PREPARE_AVX_USAGE(); \
3092 pVCpu->cpum.GstCtx.XState.x87.MXCSR = a_pfnAImpl(pVCpu->cpum.GstCtx.XState.x87.MXCSR & ~X86_MXCSR_XCPT_FLAGS, \
3093 (a0), (a1)); \
3094 } while (0)
3095
3096/**
3097 * Calls a AVX assembly implementation taking three visible arguments.
3098 *
3099 * There is one implicit zero'th argument, a pointer to the extended state.
3100 *
3101 * @param a_pfnAImpl Pointer to the assembly AVX routine.
3102 * @param a0 The first extra argument.
3103 * @param a1 The second extra argument.
3104 * @param a2 The third extra argument.
3105 */
3106#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
3107 do { \
3108 IEM_MC_PREPARE_AVX_USAGE(); \
3109 pVCpu->cpum.GstCtx.XState.x87.MXCSR = a_pfnAImpl(pVCpu->cpum.GstCtx.XState.x87.MXCSR & ~X86_MXCSR_XCPT_FLAGS, \
3110 (a0), (a1), (a2)); \
3111 } while (0)
3112
3113/** @note Not for IOPL or IF testing. */
3114#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
3115/** @note Not for IOPL or IF testing. */
3116#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
3117/** @note Not for IOPL or IF testing. */
3118#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
3119/** @note Not for IOPL or IF testing. */
3120#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
3121/** @note Not for IOPL or IF testing. */
3122#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
3123 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3124 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3125/** @note Not for IOPL or IF testing. */
3126#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
3127 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3128 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3129/** @note Not for IOPL or IF testing. */
3130#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
3131 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
3132 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3133 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3134/** @note Not for IOPL or IF testing. */
3135#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
3136 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
3137 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3138 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3139#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
3140#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
3141#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
3142#define IEM_MC_IF_CX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.cx != 1) {
3143#define IEM_MC_IF_ECX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.ecx != 1) {
3144#define IEM_MC_IF_RCX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.rcx != 1) {
3145/** @note Not for IOPL or IF testing. */
3146#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3147 if ( pVCpu->cpum.GstCtx.cx != 1 \
3148 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3149/** @note Not for IOPL or IF testing. */
3150#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3151 if ( pVCpu->cpum.GstCtx.ecx != 1 \
3152 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3153/** @note Not for IOPL or IF testing. */
3154#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3155 if ( pVCpu->cpum.GstCtx.rcx != 1 \
3156 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3157/** @note Not for IOPL or IF testing. */
3158#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3159 if ( pVCpu->cpum.GstCtx.cx != 1 \
3160 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3161/** @note Not for IOPL or IF testing. */
3162#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3163 if ( pVCpu->cpum.GstCtx.ecx != 1 \
3164 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3165/** @note Not for IOPL or IF testing. */
3166#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3167 if ( pVCpu->cpum.GstCtx.rcx != 1 \
3168 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3169#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
3170#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
3171
3172#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
3173 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
3174#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
3175 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
3176#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
3177 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
3178#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
3179 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
3180#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
3181 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
3182#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
3183 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
3184#define IEM_MC_IF_FCW_IM() \
3185 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
3186
3187#define IEM_MC_ELSE() } else {
3188#define IEM_MC_ENDIF() } do {} while (0)
3189
3190
3191/** Recompiler debugging: Flush guest register shadow copies. */
3192#define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush) ((void)0)
3193
3194/** @} */
3195
3196#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
3197
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette