VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 104099

Last change on this file since 104099 was 104099, checked in by vboxsync, 11 months ago

VMM/IEM: Emit native code for shl Ev,CL. bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 170.6 KB
Line 
1/* $Id: IEMMc.h 104099 2024-03-28 01:42:59Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42
43#define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) {
44#define IEM_MC_END() }
45
46/** Internal macro. */
47#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
48 do \
49 { \
50 VBOXSTRICTRC rcStrict2 = a_Expr; \
51 if (rcStrict2 == VINF_SUCCESS) \
52 { /* likely */ } \
53 else \
54 return rcStrict2; \
55 } while (0)
56
57
58/** Dummy MC that prevents native recompilation. */
59#define IEM_MC_NO_NATIVE_RECOMPILE() ((void)0)
60
61/** Advances RIP, finishes the instruction and returns.
62 * This may include raising debug exceptions and such. */
63#define IEM_MC_ADVANCE_RIP_AND_FINISH() return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
64/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
65#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
66 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
67/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
68 * @note only usable in 16-bit op size mode. */
69#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
70 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
71/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
72#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
73 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
74/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
75#define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) \
76 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), IEM_GET_INSTR_LEN(pVCpu))
77/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
78#define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) \
79 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP), IEM_GET_INSTR_LEN(pVCpu))
80/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
81#define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) \
82 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP), IEM_GET_INSTR_LEN(pVCpu))
83
84#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
85#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
86 do { \
87 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)))) \
88 { /* probable */ } \
89 else return iemRaiseDeviceNotAvailable(pVCpu); \
90 } while (0)
91#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
92 do { \
93 if (RT_LIKELY(!((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)))) \
94 { /* probable */ } \
95 else return iemRaiseDeviceNotAvailable(pVCpu); \
96 } while (0)
97#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
98 do { \
99 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))) \
100 { /* probable */ } \
101 else return iemRaiseMathFault(pVCpu); \
102 } while (0)
103#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
104 do { \
105 /* Since none of the bits we compare from XCR0, CR4 and CR0 overlap, it can \
106 be reduced to a single compare branch in the more probably code path. */ \
107 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) \
108 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
109 | (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) \
110 == (XSAVE_C_YMM | XSAVE_C_SSE | X86_CR4_OSXSAVE))) \
111 { /* probable */ } \
112 else if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
113 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) \
114 return iemRaiseUndefinedOpcode(pVCpu); \
115 else \
116 return iemRaiseDeviceNotAvailable(pVCpu); \
117 } while (0)
118AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR4_OSXSAVE));
119AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR0_TS));
120AssertCompile(!(X86_CR4_OSXSAVE & X86_CR0_TS));
121#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
122 do { \
123 /* Since the CR4 and CR0 bits doesn't overlap, it can be reduced to a
124 single compare branch in the more probable code path. */ \
125 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
126 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
127 == X86_CR4_OSFXSR)) \
128 { /* likely */ } \
129 else if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
130 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
131 return iemRaiseUndefinedOpcode(pVCpu); \
132 else \
133 return iemRaiseDeviceNotAvailable(pVCpu); \
134 } while (0)
135AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_CR4_OSFXSR));
136#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
137 do { \
138 /* Since the two CR0 bits doesn't overlap with FSW.ES, this can be reduced to a
139 single compare branch in the more probable code path. */ \
140 if (RT_LIKELY(!( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
141 | (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES)))) \
142 { /* probable */ } \
143 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
144 return iemRaiseUndefinedOpcode(pVCpu); \
145 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
146 return iemRaiseDeviceNotAvailable(pVCpu); \
147 else \
148 return iemRaiseMathFault(pVCpu); \
149 } while (0)
150AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_FSW_ES));
151/** @todo recomp: this one is slightly problematic as the recompiler doesn't
152 * count the CPL into the TB key. However it is safe enough for now, as
153 * it calls iemRaiseGeneralProtectionFault0 directly so no calls will be
154 * emitted for it. */
155#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
156 do { \
157 if (RT_LIKELY(IEM_GET_CPL(pVCpu) == 0)) { /* probable */ } \
158 else return iemRaiseGeneralProtectionFault0(pVCpu); \
159 } while (0)
160#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
161 do { \
162 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
163 else return iemRaiseGeneralProtectionFault0(pVCpu); \
164 } while (0)
165#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
166 do { \
167 if (RT_LIKELY( ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE) | IEM_GET_CPU_MODE(pVCpu)) \
168 == (X86_CR4_FSGSBASE | IEMMODE_64BIT))) \
169 { /* probable */ } \
170 else return iemRaiseUndefinedOpcode(pVCpu); \
171 } while (0)
172AssertCompile(X86_CR4_FSGSBASE > UINT8_MAX);
173#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
174 do { \
175 if (RT_LIKELY(IEM_IS_CANONICAL(a_u64Addr))) { /* likely */ } \
176 else return iemRaiseGeneralProtectionFault0(pVCpu); \
177 } while (0)
178#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
179 do { \
180 if (RT_LIKELY(( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
181 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)) \
182 { /* probable */ } \
183 else \
184 { \
185 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
186 return iemRaiseSimdFpException(pVCpu); \
187 return iemRaiseUndefinedOpcode(pVCpu); \
188 } \
189 } while (0)
190#define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
191 do { \
192 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
193 return iemRaiseSimdFpException(pVCpu); \
194 return iemRaiseUndefinedOpcode(pVCpu); \
195 } while (0)
196
197
198#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
199#define IEM_MC_LOCAL_ASSIGN(a_Type, a_Name, a_Value) a_Type a_Name = (a_Value)
200#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
201#define IEM_MC_NOREF(a_Name) RT_NOREF_PV(a_Name) /* NOP/liveness hack */
202#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
203#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
204#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
205/** @note IEMAllInstPython.py duplicates the expansion. */
206#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
207 uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u; \
208 uint32_t *a_pName = &a_Name
209/** @note IEMAllInstPython.py duplicates the expansion. */
210#define IEM_MC_LOCAL_EFLAGS(a_Name) uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u
211#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
212 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
213#define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) do { \
214 AssertMsg((pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) == ((a_EFlags) & ~(a_fEflOutput)), \
215 ("eflags.u=%#x (%#x) vs %s=%#x (%#x) - diff %#x (a_fEflOutput=%#x)\n", \
216 pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput), pVCpu->cpum.GstCtx.eflags.u, #a_EFlags, \
217 (a_EFlags) & ~(a_fEflOutput), (a_EFlags), \
218 (pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) ^ ((a_EFlags) & ~(a_fEflOutput)), a_fEflOutput)); \
219 pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); \
220 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); \
221 } while (0)
222#define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags) IEM_MC_COMMIT_EFLAGS(a_EFlags)
223#define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput)
224
225/** ASSUMES the source variable not used after this statement. */
226#define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) (a_VarDst) = (a_VarSrcEol)
227
228#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
229#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
230#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
231#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
232#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
233#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
234#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
235#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
236#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
237#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
238#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
239#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
240#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
241#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
242#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
243#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
244#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
245#define IEM_MC_FETCH_GREG_PAIR_U32(a_u64Dst, a_iGRegLo, a_iGRegHi) do { \
246 (a_u64Dst).s.Lo = iemGRegFetchU32(pVCpu, (a_iGRegLo)); \
247 (a_u64Dst).s.Hi = iemGRegFetchU32(pVCpu, (a_iGRegHi)); \
248 } while(0)
249#define IEM_MC_FETCH_GREG_PAIR_U64(a_u128Dst, a_iGRegLo, a_iGRegHi) do { \
250 (a_u128Dst).s.Lo = iemGRegFetchU64(pVCpu, (a_iGRegLo)); \
251 (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
252 } while(0)
253#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
254 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
255 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
256 } while (0)
257#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
258 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
259 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
260 } while (0)
261#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
262 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
263 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
264 } while (0)
265/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
266#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
267 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
268 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
269 } while (0)
270#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
271 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
272 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
273 } while (0)
274/** @note Not for IOPL or IF testing or modification. */
275#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
276#define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_FETCH_EFLAGS(a_EFlags)
277#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u /* (only LAHF) */
278#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
279#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
280
281#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
282#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
283#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
284#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
285#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
286#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
287#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
288#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
289#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
290#define IEM_MC_STORE_GREG_PAIR_U32(a_iGRegLo, a_iGRegHi, a_u64Value) do { \
291 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint32_t)(a_u64Value).s.Lo; \
292 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint32_t)(a_u64Value).s.Hi; \
293 } while(0)
294#define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) do { \
295 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint64_t)(a_u128Value).s.Lo; \
296 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint64_t)(a_u128Value).s.Hi; \
297 } while(0)
298#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
299
300/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
301#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
302 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
303 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
304 } while (0)
305#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
306 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
307 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
308 } while (0)
309#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
310 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
311
312
313#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
314#define IEM_MC_REF_GREG_U8_CONST(a_pu8Dst, a_iGReg) (a_pu8Dst) = (uint8_t const *)iemGRegRefU8( pVCpu, (a_iGReg))
315#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
316#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
317/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
318 * Use IEM_MC_CLEAR_HIGH_GREG_U64! */
319#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
320#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
321#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
322#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
323#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
324#define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
325#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
326#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
327/** @note Not for IOPL or IF testing or modification.
328 * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
329#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
330#define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) IEM_MC_REF_EFLAGS(a_pEFlags)
331#define IEM_MC_REF_MXCSR(a_pfMxcsr) (a_pfMxcsr) = &pVCpu->cpum.GstCtx.XState.x87.MXCSR
332
333#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
334#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
335 do { \
336 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
337 *pu32Reg += (a_u32Value); \
338 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
339 } while (0)
340#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
341
342#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8Const) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u8Const)
343#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
344 do { \
345 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
346 *pu32Reg -= (a_u8Const); \
347 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
348 } while (0)
349#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
350#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
351
352#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
353#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
354#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
355#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
356#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
357#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
358#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
359
360#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
361#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
362#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
363#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
364
365#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
366#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
367#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
368
369#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
370#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
371#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
372
373#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
374#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
375#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
376
377#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
378
379#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
380#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
381#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
382
383#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
384
385#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
386
387#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
388#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
389#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
390 do { \
391 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
392 *pu32Reg &= (a_u32Value); \
393 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
394 } while (0)
395#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
396
397#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
398#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
399#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
400 do { \
401 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
402 *pu32Reg |= (a_u32Value); \
403 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
404 } while (0)
405#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
406
407#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
408#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
409#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
410
411/** @note Not for IOPL or IF modification. */
412#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
413/** @note Not for IOPL or IF modification. */
414#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
415/** @note Not for IOPL or IF modification. */
416#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
417
418#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
419
420/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
421#define IEM_MC_FPU_TO_MMX_MODE() do { \
422 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
423 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
424 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
425 } while (0)
426
427/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
428#define IEM_MC_FPU_FROM_MMX_MODE() do { \
429 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
430 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
431 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
432 } while (0)
433
434#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
435 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
436#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg, a_iDWord) \
437 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[a_iDWord]; } while (0)
438#define IEM_MC_FETCH_MREG_U16(a_u16Value, a_iMReg, a_iWord) \
439 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[a_iWord]; } while (0)
440#define IEM_MC_FETCH_MREG_U8(a_u8Value, a_iMReg, a_iByte) \
441 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[a_iByte]; } while (0)
442#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
443 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
444 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
445 } while (0)
446#define IEM_MC_STORE_MREG_U32(a_iMReg, a_iDword, a_u32Value) \
447 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[(a_iDword)] = (a_u32Value); \
448 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
449 } while (0)
450#define IEM_MC_STORE_MREG_U16(a_iMReg, a_iWord, a_u16Value) \
451 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[(a_iWord)] = (a_u16Value); \
452 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
453 } while (0)
454#define IEM_MC_STORE_MREG_U8(a_iMReg, a_iByte, a_u8Value) \
455 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[(a_iByte)] = (a_u8Value); \
456 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
457 } while (0)
458#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
459 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
460 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
461 } while (0)
462#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
463 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
464#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
465 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
466#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
467 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
468#define IEM_MC_MODIFIED_MREG(a_iMReg) \
469 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
470#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
471 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
472
473#define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
474 do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
475 if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
476 if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
477 if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
478 } while (0)
479#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
480 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
481 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
482 } while (0)
483#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
484 do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
485 (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
486 } while (0)
487#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
488 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
489#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
490 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
491#define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
492 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
493#define IEM_MC_FETCH_XREG_U8( a_u8Value, a_iXReg, a_iByte) \
494 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)]; } while (0)
495#define IEM_MC_FETCH_XREG_PAIR_U128(a_Dst, a_iXReg1, a_iXReg2) \
496 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
497 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
498 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
499 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
500 } while (0)
501#define IEM_MC_FETCH_XREG_PAIR_XMM(a_Dst, a_iXReg1, a_iXReg2) \
502 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
503 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
504 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
505 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
506 } while (0)
507#define IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iXReg2) \
508 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
509 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
510 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
511 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
512 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
513 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
514 } while (0)
515#define IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iXReg2) \
516 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
517 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
518 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
519 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
520 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
521 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
522 } while (0)
523#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
524 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
525 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
526 } while (0)
527#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
528 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
529 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
530 } while (0)
531#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
532 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
533#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
534 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
535#define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
536 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
537#define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
538 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
539#define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
540 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)] = (a_u16Value); } while (0)
541#define IEM_MC_STORE_XREG_U8(a_iXReg, a_iByte, a_u8Value) \
542 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)] = (a_u8Value); } while (0)
543
544#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
545 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
546 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
547 } while (0)
548
549#define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
550 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
551#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
552 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
553#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
554 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
555#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
556 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
557 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
558 } while (0)
559
560#define IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX(a_iXRegDst, a_u8Src) \
561 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
562 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[0] = (a_u8Src); \
563 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[1] = (a_u8Src); \
564 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[2] = (a_u8Src); \
565 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[3] = (a_u8Src); \
566 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[4] = (a_u8Src); \
567 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[5] = (a_u8Src); \
568 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[6] = (a_u8Src); \
569 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[7] = (a_u8Src); \
570 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[8] = (a_u8Src); \
571 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[9] = (a_u8Src); \
572 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[10] = (a_u8Src); \
573 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[11] = (a_u8Src); \
574 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[12] = (a_u8Src); \
575 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[13] = (a_u8Src); \
576 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[14] = (a_u8Src); \
577 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[15] = (a_u8Src); \
578 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
579 } while (0)
580#define IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX(a_iXRegDst, a_u16Src) \
581 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
582 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[0] = (a_u16Src); \
583 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[1] = (a_u16Src); \
584 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[2] = (a_u16Src); \
585 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[3] = (a_u16Src); \
586 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[4] = (a_u16Src); \
587 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[5] = (a_u16Src); \
588 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[6] = (a_u16Src); \
589 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[7] = (a_u16Src); \
590 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
591 } while (0)
592#define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
593 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
594 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[0] = (a_u32Src); \
595 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[1] = (a_u32Src); \
596 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[2] = (a_u32Src); \
597 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[3] = (a_u32Src); \
598 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
599 } while (0)
600#define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
601 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
602 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[0] = (a_u64Src); \
603 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[1] = (a_u64Src); \
604 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
605 } while (0)
606
607#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
608 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
609#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
610 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
611#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
612 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
613#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
614 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
615#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
616 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
617#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
618 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
619#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
620 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
621#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
622 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
623 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
624 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
625 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
626 } while (0)
627
628#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
629 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
630 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
631 } while (0)
632#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc, a_iQWord) \
633 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
634 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[(a_iQWord)]; \
635 } while (0)
636#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc, a_iDQword) \
637 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
638 if ((a_iDQword) == 0) \
639 { \
640 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[0]; \
641 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[1]; \
642 } \
643 else \
644 { \
645 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[0]; \
646 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[1]; \
647 } \
648 } while (0)
649#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
650 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
651 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
652 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
653 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
654 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
655 } while (0)
656
657#define IEM_MC_STORE_YREG_U128(a_iYRegDst, a_iDQword, a_u128Value) \
658 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
659 if ((a_iDQword) == 0) \
660 { \
661 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
662 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
663 } \
664 else \
665 { \
666 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
667 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
668 } \
669 } while (0)
670
671#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
672#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
673 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
674 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
675 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
676 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
677 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
678 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
679 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
680 } while (0)
681#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
682 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
683 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
684 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
685 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
686 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
687 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
688 } while (0)
689#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
690 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
691 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
692 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
693 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
694 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
695 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
696 } while (0)
697#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
698 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
699 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
700 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
701 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
702 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
703 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
704 } while (0)
705
706#define IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX(a_iYRegDst, a_u8Src) \
707 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
708 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[0] = (a_u8Src); \
709 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[1] = (a_u8Src); \
710 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[2] = (a_u8Src); \
711 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[3] = (a_u8Src); \
712 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[4] = (a_u8Src); \
713 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[5] = (a_u8Src); \
714 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[6] = (a_u8Src); \
715 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[7] = (a_u8Src); \
716 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[8] = (a_u8Src); \
717 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[9] = (a_u8Src); \
718 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[10] = (a_u8Src); \
719 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[11] = (a_u8Src); \
720 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[12] = (a_u8Src); \
721 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[13] = (a_u8Src); \
722 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[14] = (a_u8Src); \
723 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[15] = (a_u8Src); \
724 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[0] = (a_u8Src); \
725 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[1] = (a_u8Src); \
726 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[2] = (a_u8Src); \
727 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[3] = (a_u8Src); \
728 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[4] = (a_u8Src); \
729 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[5] = (a_u8Src); \
730 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[6] = (a_u8Src); \
731 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[7] = (a_u8Src); \
732 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[8] = (a_u8Src); \
733 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[9] = (a_u8Src); \
734 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[10] = (a_u8Src); \
735 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[11] = (a_u8Src); \
736 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[12] = (a_u8Src); \
737 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[13] = (a_u8Src); \
738 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[14] = (a_u8Src); \
739 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[15] = (a_u8Src); \
740 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
741 } while (0)
742#define IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX(a_iYRegDst, a_u16Src) \
743 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
744 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[0] = (a_u16Src); \
745 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[1] = (a_u16Src); \
746 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[2] = (a_u16Src); \
747 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[3] = (a_u16Src); \
748 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[4] = (a_u16Src); \
749 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[5] = (a_u16Src); \
750 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[6] = (a_u16Src); \
751 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[7] = (a_u16Src); \
752 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[0] = (a_u16Src); \
753 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[1] = (a_u16Src); \
754 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[2] = (a_u16Src); \
755 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[3] = (a_u16Src); \
756 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[4] = (a_u16Src); \
757 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[5] = (a_u16Src); \
758 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[6] = (a_u16Src); \
759 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[7] = (a_u16Src); \
760 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
761 } while (0)
762#define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
763 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
764 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
765 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = (a_u32Src); \
766 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[2] = (a_u32Src); \
767 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[3] = (a_u32Src); \
768 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[0] = (a_u32Src); \
769 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[1] = (a_u32Src); \
770 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[2] = (a_u32Src); \
771 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[3] = (a_u32Src); \
772 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
773 } while (0)
774#define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
775 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
776 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
777 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Src); \
778 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u64Src); \
779 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u64Src); \
780 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
781 } while (0)
782#define IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
783 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
784 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
785 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
786 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
787 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
788 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
789 } while (0)
790
791#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
792 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
793#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
794 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
795#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
796 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
797#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
798 do { uintptr_t const iYRegTmp = (a_iYReg); \
799 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
800 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
801 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
802 } while (0)
803
804#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
805 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
806 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
807 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
808 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
809 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
810 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
811 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
812 } while (0)
813#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
814 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
815 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
816 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
817 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
818 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
819 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
820 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
821 } while (0)
822#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
823 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
824 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
825 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
826 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
827 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
828 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
829 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
830 } while (0)
831
832#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
833 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
834 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
835 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
836 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
837 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
838 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
839 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
840 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
841 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
842 } while (0)
843#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
844 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
845 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
846 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
847 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
848 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
849 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
850 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
851 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
852 } while (0)
853#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
854 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
855 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
856 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
857 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
858 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
859 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
860 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
861 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
862 } while (0)
863#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
864 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
865 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
866 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
867 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
868 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
869 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
870 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
871 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
872 } while (0)
873#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
874 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
875 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
876 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
877 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
878 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
879 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
880 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
881 } while (0)
882#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
883 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
884 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
885 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
886 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
887 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
888 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
889 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
890 } while (0)
891
892#ifndef IEM_WITH_SETJMP
893# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
894 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
895# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
896 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
897# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
898 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
899#else
900# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
901 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
902# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
903 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
904# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
905 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
906
907# define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
908 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
909# define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
910 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
911# define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
912 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
913#endif
914
915#ifndef IEM_WITH_SETJMP
916# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
917 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
918# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
919 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
920# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
921 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
922#else
923# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
924 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
925# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
926 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
927# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
928 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
929
930# define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
931 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
932# define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
933 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
934# define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
935 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
936#endif
937
938#ifndef IEM_WITH_SETJMP
939# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
940 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
941# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
943# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
944 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
945#else
946# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
947 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
948# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
949 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
950# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
951 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
952
953# define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
954 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
955# define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
956 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
957# define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
958 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
959#endif
960
961#ifndef IEM_WITH_SETJMP
962# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
963 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
964# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
965 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
966# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
967 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
968# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
969 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
970#else
971# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
972 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
973# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
974 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
975# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
976 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
977# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
978 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
979
980# define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
981 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
982# define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
983 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
984# define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
985 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
986# define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
987 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
988#endif
989
990#ifndef IEM_WITH_SETJMP
991# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
992 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
993# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
994 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
995# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
996 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
997# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
998 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
999#else
1000# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
1001 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1002# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
1003 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1004# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
1005 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
1006# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
1007 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
1008
1009# define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
1010 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1011# define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
1012 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1013# define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
1014 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
1015# define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
1016 iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
1017#endif
1018
1019#ifndef IEM_WITH_SETJMP
1020# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
1021 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1022# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
1023 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1024# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1025 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1026
1027# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1028 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
1029# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1030 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
1031# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
1032 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_XmmDst).au32[(a_iDWord)], (a_iSeg), (a_GCPtrMem)))
1033# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
1034 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_XmmDst).au64[(a_iQWord)], (a_iSeg), (a_GCPtrMem)))
1035
1036# define IEM_MC_FETCH_MEM_U128_NO_AC_AND_XREG_U128(a_u128Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1037 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1038 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1039 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1040 } while (0)
1041
1042# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1043 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \
1044 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1045 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1046 } while (0)
1047
1048# define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1049 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1050 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1051 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_Dst).uSrc2.uXmm.au32[(a_iDWord2)], (a_iSeg2), (a_GCPtrMem2))); \
1052 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1053 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1054 } while (0)
1055
1056# define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1057 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1058 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_Dst).uSrc2.uXmm.au64[(a_iQWord2)], (a_iSeg2), (a_GCPtrMem2))); \
1059 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1060 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1061 } while (0)
1062
1063# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1064 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1065 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1066 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1067 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1068 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1069 } while (0)
1070# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1071 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1072 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1073 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1074 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1075 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1076 } while (0)
1077
1078#else
1079# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
1080 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1081# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
1082 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1083# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1084 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1085
1086# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
1087 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1088# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1089 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1090# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1091 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1092# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
1093 (a_XmmDst).au32[(a_iDWord)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
1094# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
1095 (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
1096
1097# define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
1098 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1099# define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
1100 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1101# define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
1102 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1103
1104# define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
1105 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1106# define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
1107 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1108# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
1109 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1110# define IEM_MC_FETCH_MEM_FLAT_XMM_U32(a_XmmDst, a_iDWord, a_GCPtrMem) \
1111 (a_XmmDst).au32[(a_iDWord)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))
1112# define IEM_MC_FETCH_MEM_FLAT_XMM_U64(a_XmmDst, a_iQWord, a_GCPtrMem) \
1113 (a_XmmDst).au64[(a_iQWord)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))
1114
1115# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1116 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1117 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1118 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1119 } while (0)
1120# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1121 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1122 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1123 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1124 } while (0)
1125
1126# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1127 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
1128 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1129 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1130 } while (0)
1131# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1132 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
1133 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1134 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1135 } while (0)
1136
1137# define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1138 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1139 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1140 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1141 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1142 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1143 } while (0)
1144# define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
1145 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1146 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1147 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem2)); \
1148 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1149 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1150 } while (0)
1151
1152# define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1153 (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \
1154 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1155 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1156 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1157 } while (0)
1158# define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do { \
1159 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1160 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \
1161 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1162 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1163 } while (0)
1164
1165
1166# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1167 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1168 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1169 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1170 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1171 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1172 } while (0)
1173# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1174 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1175 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1176 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1177 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1178 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1179 } while (0)
1180
1181# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1182 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1183 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1184 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1185 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1186 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1187 } while (0)
1188# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1189 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1190 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1191 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1192 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1193 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1194 } while (0)
1195
1196#endif
1197
1198#ifndef IEM_WITH_SETJMP
1199# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1200 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1201# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1202 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1203# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1204 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1205
1206# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1207 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1208# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1209 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1210# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1211 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1212#else
1213# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1214 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1215# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1216 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1217# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1218 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1219
1220# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1221 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1222# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1223 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1224# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1225 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1226
1227# define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
1228 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1229# define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
1230 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1231# define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
1232 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1233
1234# define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
1235 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1236# define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
1237 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1238# define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
1239 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1240#endif
1241
1242
1243
1244#ifndef IEM_WITH_SETJMP
1245# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1246 do { \
1247 uint8_t u8Tmp; \
1248 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1249 (a_u16Dst) = u8Tmp; \
1250 } while (0)
1251# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1252 do { \
1253 uint8_t u8Tmp; \
1254 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1255 (a_u32Dst) = u8Tmp; \
1256 } while (0)
1257# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1258 do { \
1259 uint8_t u8Tmp; \
1260 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1261 (a_u64Dst) = u8Tmp; \
1262 } while (0)
1263# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1264 do { \
1265 uint16_t u16Tmp; \
1266 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1267 (a_u32Dst) = u16Tmp; \
1268 } while (0)
1269# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1270 do { \
1271 uint16_t u16Tmp; \
1272 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1273 (a_u64Dst) = u16Tmp; \
1274 } while (0)
1275# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1276 do { \
1277 uint32_t u32Tmp; \
1278 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1279 (a_u64Dst) = u32Tmp; \
1280 } while (0)
1281#else /* IEM_WITH_SETJMP */
1282# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1283 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1284# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1285 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1286# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1287 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1288# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1289 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1290# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1291 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1292# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1293 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1294
1295# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
1296 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1297# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
1298 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1299# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
1300 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1301# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
1302 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1303# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
1304 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1305# define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
1306 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1307#endif /* IEM_WITH_SETJMP */
1308
1309#ifndef IEM_WITH_SETJMP
1310# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1311 do { \
1312 uint8_t u8Tmp; \
1313 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1314 (a_u16Dst) = (int8_t)u8Tmp; \
1315 } while (0)
1316# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1317 do { \
1318 uint8_t u8Tmp; \
1319 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1320 (a_u32Dst) = (int8_t)u8Tmp; \
1321 } while (0)
1322# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1323 do { \
1324 uint8_t u8Tmp; \
1325 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1326 (a_u64Dst) = (int8_t)u8Tmp; \
1327 } while (0)
1328# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1329 do { \
1330 uint16_t u16Tmp; \
1331 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1332 (a_u32Dst) = (int16_t)u16Tmp; \
1333 } while (0)
1334# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1335 do { \
1336 uint16_t u16Tmp; \
1337 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1338 (a_u64Dst) = (int16_t)u16Tmp; \
1339 } while (0)
1340# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1341 do { \
1342 uint32_t u32Tmp; \
1343 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1344 (a_u64Dst) = (int32_t)u32Tmp; \
1345 } while (0)
1346#else /* IEM_WITH_SETJMP */
1347# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1348 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1349# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1350 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1351# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1352 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1353# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1354 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1355# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1356 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1357# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1358 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1359
1360# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
1361 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1362# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
1363 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1364# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
1365 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1366# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
1367 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1368# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
1369 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1370# define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
1371 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1372#endif /* IEM_WITH_SETJMP */
1373
1374#ifndef IEM_WITH_SETJMP
1375# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1376 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
1377# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1378 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
1379# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1380 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
1381# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1382 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
1383#else
1384# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1385 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
1386# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1387 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
1388# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1389 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
1390# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1391 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
1392
1393# define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
1394 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
1395# define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
1396 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
1397# define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
1398 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
1399# define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
1400 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
1401#endif
1402
1403#ifndef IEM_WITH_SETJMP
1404# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1405 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
1406# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1407 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
1408# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1409 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
1410# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1411 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
1412#else
1413# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1414 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
1415# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1416 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
1417# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1418 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
1419# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1420 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
1421
1422# define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
1423 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
1424# define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
1425 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
1426# define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
1427 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
1428# define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
1429 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
1430#endif
1431
1432#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
1433#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
1434#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
1435#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
1436#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
1437#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
1438#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
1439 do { \
1440 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1441 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
1442 } while (0)
1443#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
1444 do { \
1445 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1446 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
1447 } while (0)
1448
1449#ifndef IEM_WITH_SETJMP
1450# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1451 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
1452# define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1453 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
1454# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1455 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1456#else
1457# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1458 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1459# define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1460 iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1461# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1462 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1463
1464# define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
1465 iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1466# define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
1467 iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1468# define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
1469 iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
1470#endif
1471
1472#ifndef IEM_WITH_SETJMP
1473# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1474 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1475# define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1476 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1477# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1478 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1479#else
1480# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1481 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1482# define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1483 iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1484# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1485 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1486
1487# define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
1488 iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1489# define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
1490 iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1491# define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
1492 iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1493#endif
1494
1495/* Regular stack push and pop: */
1496#ifndef IEM_WITH_SETJMP
1497# define IEM_MC_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1498# define IEM_MC_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1499# define IEM_MC_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
1500# define IEM_MC_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1501
1502# define IEM_MC_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1503# define IEM_MC_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
1504# define IEM_MC_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
1505#else
1506# define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
1507# define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
1508# define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
1509# define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
1510
1511# define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
1512# define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
1513# define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
1514#endif
1515
1516/* 32-bit flat stack push and pop: */
1517#ifndef IEM_WITH_SETJMP
1518# define IEM_MC_FLAT32_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1519# define IEM_MC_FLAT32_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1520# define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
1521
1522# define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1523# define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
1524#else
1525# define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
1526# define IEM_MC_FLAT32_PUSH_U32(a_u32Value) iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
1527# define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
1528
1529# define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
1530# define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
1531#endif
1532
1533/* 64-bit flat stack push and pop: */
1534#ifndef IEM_WITH_SETJMP
1535# define IEM_MC_FLAT64_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1536# define IEM_MC_FLAT64_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1537
1538# define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1539# define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
1540#else
1541# define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
1542# define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
1543
1544# define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
1545# define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
1546#endif
1547
1548
1549/* 8-bit */
1550
1551/**
1552 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1553 * acccess, for atomic operations.
1554 *
1555 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1556 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1557 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1558 * @param[in] a_GCPtrMem The memory address.
1559 * @remarks Will return/long jump on errors.
1560 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1561 */
1562#ifndef IEM_WITH_SETJMP
1563# define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1564 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1565 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
1566#else
1567# define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1568 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1569#endif
1570
1571/**
1572 * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
1573 *
1574 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1575 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1576 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1577 * @param[in] a_GCPtrMem The memory address.
1578 * @remarks Will return/long jump on errors.
1579 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1580 */
1581#ifndef IEM_WITH_SETJMP
1582# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1583 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1584 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
1585#else
1586# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1587 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1588#endif
1589
1590/**
1591 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
1592 *
1593 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1594 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1595 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1596 * @param[in] a_GCPtrMem The memory address.
1597 * @remarks Will return/long jump on errors.
1598 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1599 */
1600#ifndef IEM_WITH_SETJMP
1601# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1602 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1603 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
1604#else
1605# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1606 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1607#endif
1608
1609/**
1610 * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
1611 *
1612 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1613 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1614 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1615 * @param[in] a_GCPtrMem The memory address.
1616 * @remarks Will return/long jump on errors.
1617 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1618 */
1619#ifndef IEM_WITH_SETJMP
1620# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1621 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1622 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
1623#else
1624# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1625 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1626#endif
1627
1628/**
1629 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1630 * acccess, flat address variant.
1631 *
1632 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1633 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1634 * @param[in] a_GCPtrMem The memory address.
1635 * @remarks Will return/long jump on errors.
1636 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1637 */
1638#ifndef IEM_WITH_SETJMP
1639# define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1640 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1641 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
1642#else
1643# define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1644 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1645#endif
1646
1647/**
1648 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
1649 * address variant.
1650 *
1651 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1652 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1653 * @param[in] a_GCPtrMem The memory address.
1654 * @remarks Will return/long jump on errors.
1655 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1656 */
1657#ifndef IEM_WITH_SETJMP
1658# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1659 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1660 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
1661#else
1662# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1663 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1664#endif
1665
1666/**
1667 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
1668 * address variant.
1669 *
1670 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1671 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1672 * @param[in] a_GCPtrMem The memory address.
1673 * @remarks Will return/long jump on errors.
1674 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1675 */
1676#ifndef IEM_WITH_SETJMP
1677# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1678 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1679 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
1680#else
1681# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1682 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1683#endif
1684
1685/**
1686 * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
1687 * address variant.
1688 *
1689 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1690 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1691 * @param[in] a_GCPtrMem The memory address.
1692 * @remarks Will return/long jump on errors.
1693 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1694 */
1695#ifndef IEM_WITH_SETJMP
1696# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1697 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1698 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
1699#else
1700# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1701 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1702#endif
1703
1704
1705/* 16-bit */
1706
1707/**
1708 * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess.
1709 *
1710 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1711 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1712 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1713 * @param[in] a_GCPtrMem The memory address.
1714 * @remarks Will return/long jump on errors.
1715 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1716 */
1717#ifndef IEM_WITH_SETJMP
1718# define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1719 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1720 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
1721#else
1722# define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1723 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1724#endif
1725
1726/**
1727 * Maps guest memory for word read+write direct (or bounce) buffer acccess.
1728 *
1729 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1730 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1731 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1732 * @param[in] a_GCPtrMem The memory address.
1733 * @remarks Will return/long jump on errors.
1734 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1735 */
1736#ifndef IEM_WITH_SETJMP
1737# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1738 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1739 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
1740#else
1741# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1742 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1743#endif
1744
1745/**
1746 * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
1747 *
1748 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1749 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1750 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1751 * @param[in] a_GCPtrMem The memory address.
1752 * @remarks Will return/long jump on errors.
1753 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1754 */
1755#ifndef IEM_WITH_SETJMP
1756# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1757 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1758 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
1759#else
1760# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1761 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1762#endif
1763
1764/**
1765 * Maps guest memory for word readonly direct (or bounce) buffer acccess.
1766 *
1767 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1768 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1769 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1770 * @param[in] a_GCPtrMem The memory address.
1771 * @remarks Will return/long jump on errors.
1772 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1773 */
1774#ifndef IEM_WITH_SETJMP
1775# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1776 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1777 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
1778#else
1779# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1780 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1781#endif
1782
1783/**
1784 * Maps guest memory for word atomic read+write direct (or bounce) buffer
1785 * acccess, flat address variant.
1786 *
1787 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1788 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1789 * @param[in] a_GCPtrMem The memory address.
1790 * @remarks Will return/long jump on errors.
1791 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1792 */
1793#ifndef IEM_WITH_SETJMP
1794# define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1795 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1796 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
1797#else
1798# define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1799 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1800#endif
1801
1802/**
1803 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
1804 * address variant.
1805 *
1806 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1807 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1808 * @param[in] a_GCPtrMem The memory address.
1809 * @remarks Will return/long jump on errors.
1810 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1811 */
1812#ifndef IEM_WITH_SETJMP
1813# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1814 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1815 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
1816#else
1817# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1818 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1819#endif
1820
1821/**
1822 * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
1823 * address variant.
1824 *
1825 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1826 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1827 * @param[in] a_GCPtrMem The memory address.
1828 * @remarks Will return/long jump on errors.
1829 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1830 */
1831#ifndef IEM_WITH_SETJMP
1832# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1833 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1834 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
1835#else
1836# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1837 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1838#endif
1839
1840/**
1841 * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
1842 * address variant.
1843 *
1844 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1845 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1846 * @param[in] a_GCPtrMem The memory address.
1847 * @remarks Will return/long jump on errors.
1848 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1849 */
1850#ifndef IEM_WITH_SETJMP
1851# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1852 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1853 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
1854#else
1855# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1856 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1857#endif
1858
1859/** int16_t alias. */
1860#ifndef IEM_WITH_SETJMP
1861# define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1862 IEM_MC_MEM_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
1863#else
1864# define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1865 (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1866#endif
1867
1868/** Flat int16_t alias. */
1869#ifndef IEM_WITH_SETJMP
1870# define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1871 IEM_MC_MEM_FLAT_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem)
1872#else
1873# define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1874 (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1875#endif
1876
1877
1878/* 32-bit */
1879
1880/**
1881 * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess.
1882 *
1883 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1884 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1885 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1886 * @param[in] a_GCPtrMem The memory address.
1887 * @remarks Will return/long jump on errors.
1888 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1889 */
1890#ifndef IEM_WITH_SETJMP
1891# define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1892 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1893 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
1894#else
1895# define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1896 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1897#endif
1898
1899/**
1900 * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
1901 *
1902 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1903 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1904 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1905 * @param[in] a_GCPtrMem The memory address.
1906 * @remarks Will return/long jump on errors.
1907 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1908 */
1909#ifndef IEM_WITH_SETJMP
1910# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1911 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1912 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
1913#else
1914# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1915 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1916#endif
1917
1918/**
1919 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
1920 *
1921 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1922 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1923 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1924 * @param[in] a_GCPtrMem The memory address.
1925 * @remarks Will return/long jump on errors.
1926 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1927 */
1928#ifndef IEM_WITH_SETJMP
1929# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1930 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1931 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
1932#else
1933# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1934 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1935#endif
1936
1937/**
1938 * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
1939 *
1940 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1941 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1942 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1943 * @param[in] a_GCPtrMem The memory address.
1944 * @remarks Will return/long jump on errors.
1945 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1946 */
1947#ifndef IEM_WITH_SETJMP
1948# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1949 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1950 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
1951#else
1952# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1953 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1954#endif
1955
1956/**
1957 * Maps guest memory for dword atomic read+write direct (or bounce) buffer
1958 * acccess, flat address variant.
1959 *
1960 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1961 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1962 * @param[in] a_GCPtrMem The memory address.
1963 * @remarks Will return/long jump on errors.
1964 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1965 */
1966#ifndef IEM_WITH_SETJMP
1967# define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1968 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
1969 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
1970#else
1971# define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1972 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1973#endif
1974
1975/**
1976 * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
1977 * flat address variant.
1978 *
1979 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1980 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1981 * @param[in] a_GCPtrMem The memory address.
1982 * @remarks Will return/long jump on errors.
1983 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1984 */
1985#ifndef IEM_WITH_SETJMP
1986# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1987 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
1988 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
1989#else
1990# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1991 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1992#endif
1993
1994/**
1995 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
1996 * address variant.
1997 *
1998 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1999 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2000 * @param[in] a_GCPtrMem The memory address.
2001 * @remarks Will return/long jump on errors.
2002 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2003 */
2004#ifndef IEM_WITH_SETJMP
2005# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2006 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2007 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
2008#else
2009# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2010 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2011#endif
2012
2013/**
2014 * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
2015 * address variant.
2016 *
2017 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
2018 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2019 * @param[in] a_GCPtrMem The memory address.
2020 * @remarks Will return/long jump on errors.
2021 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2022 */
2023#ifndef IEM_WITH_SETJMP
2024# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2025 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2026 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
2027#else
2028# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2029 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2030#endif
2031
2032/** int32_t alias. */
2033#ifndef IEM_WITH_SETJMP
2034# define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2035 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2036#else
2037# define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2038 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2039#endif
2040
2041/** Flat int32_t alias. */
2042#ifndef IEM_WITH_SETJMP
2043# define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
2044 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem)
2045#else
2046# define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
2047 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2048#endif
2049
2050/** RTFLOAT32U alias. */
2051#ifndef IEM_WITH_SETJMP
2052# define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2053 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2054#else
2055# define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2056 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2057#endif
2058
2059/** Flat RTFLOAT32U alias. */
2060#ifndef IEM_WITH_SETJMP
2061# define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
2062 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem)
2063#else
2064# define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
2065 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2066#endif
2067
2068
2069/* 64-bit */
2070
2071/**
2072 * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess.
2073 *
2074 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2075 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2076 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2077 * @param[in] a_GCPtrMem The memory address.
2078 * @remarks Will return/long jump on errors.
2079 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2080 */
2081#ifndef IEM_WITH_SETJMP
2082# define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2083 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2084 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
2085#else
2086# define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2087 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2088#endif
2089
2090/**
2091 * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
2092 *
2093 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2094 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2095 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2096 * @param[in] a_GCPtrMem The memory address.
2097 * @remarks Will return/long jump on errors.
2098 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2099 */
2100#ifndef IEM_WITH_SETJMP
2101# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2102 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2103 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
2104#else
2105# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2106 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2107#endif
2108
2109/**
2110 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
2111 *
2112 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2113 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2114 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2115 * @param[in] a_GCPtrMem The memory address.
2116 * @remarks Will return/long jump on errors.
2117 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2118 */
2119#ifndef IEM_WITH_SETJMP
2120# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2121 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2122 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2123#else
2124# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2125 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2126#endif
2127
2128/**
2129 * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
2130 *
2131 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2132 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2133 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2134 * @param[in] a_GCPtrMem The memory address.
2135 * @remarks Will return/long jump on errors.
2136 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2137 */
2138#ifndef IEM_WITH_SETJMP
2139# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2140 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2141 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
2142#else
2143# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2144 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2145#endif
2146
2147/**
2148 * Maps guest memory for qword atomic read+write direct (or bounce) buffer
2149 * acccess, flat address variant.
2150 *
2151 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2152 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2153 * @param[in] a_GCPtrMem The memory address.
2154 * @remarks Will return/long jump on errors.
2155 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2156 */
2157#ifndef IEM_WITH_SETJMP
2158# define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2159 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2160 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
2161#else
2162# define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2163 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2164#endif
2165
2166/**
2167 * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
2168 * flat address variant.
2169 *
2170 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2171 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2172 * @param[in] a_GCPtrMem The memory address.
2173 * @remarks Will return/long jump on errors.
2174 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2175 */
2176#ifndef IEM_WITH_SETJMP
2177# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2178 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2179 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
2180#else
2181# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2182 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2183#endif
2184
2185/**
2186 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
2187 * address variant.
2188 *
2189 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2190 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2191 * @param[in] a_GCPtrMem The memory address.
2192 * @remarks Will return/long jump on errors.
2193 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2194 */
2195#ifndef IEM_WITH_SETJMP
2196# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2197 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2198 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2199#else
2200# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2201 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2202#endif
2203
2204/**
2205 * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
2206 * address variant.
2207 *
2208 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2209 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2210 * @param[in] a_GCPtrMem The memory address.
2211 * @remarks Will return/long jump on errors.
2212 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2213 */
2214#ifndef IEM_WITH_SETJMP
2215# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2216 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2217 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
2218#else
2219# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2220 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2221#endif
2222
2223/** int64_t alias. */
2224#ifndef IEM_WITH_SETJMP
2225# define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2226 IEM_MC_MEM_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2227#else
2228# define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2229 (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2230#endif
2231
2232/** Flat int64_t alias. */
2233#ifndef IEM_WITH_SETJMP
2234# define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
2235 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem)
2236#else
2237# define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
2238 (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2239#endif
2240
2241/** RTFLOAT64U alias. */
2242#ifndef IEM_WITH_SETJMP
2243# define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2244 IEM_MC_MEM_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2245#else
2246# define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2247 (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2248#endif
2249
2250/** Flat RTFLOAT64U alias. */
2251#ifndef IEM_WITH_SETJMP
2252# define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
2253 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem)
2254#else
2255# define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
2256 (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2257#endif
2258
2259
2260/* 128-bit */
2261
2262/**
2263 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess.
2264 *
2265 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2266 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2267 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2268 * @param[in] a_GCPtrMem The memory address.
2269 * @remarks Will return/long jump on errors.
2270 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2271 */
2272#ifndef IEM_WITH_SETJMP
2273# define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2274 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
2275 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1))
2276#else
2277# define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2278 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2279#endif
2280
2281/**
2282 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess.
2283 *
2284 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2285 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2286 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2287 * @param[in] a_GCPtrMem The memory address.
2288 * @remarks Will return/long jump on errors.
2289 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2290 */
2291#ifndef IEM_WITH_SETJMP
2292# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2293 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
2294 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1))
2295#else
2296# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2297 (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2298#endif
2299
2300/**
2301 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess.
2302 *
2303 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2304 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2305 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2306 * @param[in] a_GCPtrMem The memory address.
2307 * @remarks Will return/long jump on errors.
2308 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2309 */
2310#ifndef IEM_WITH_SETJMP
2311# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2312 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
2313 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
2314#else
2315# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2316 (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2317#endif
2318
2319/**
2320 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess.
2321 *
2322 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2323 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2324 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2325 * @param[in] a_GCPtrMem The memory address.
2326 * @remarks Will return/long jump on errors.
2327 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2328 */
2329#ifndef IEM_WITH_SETJMP
2330# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2331 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
2332 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
2333#else
2334# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2335 (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2336#endif
2337
2338/**
2339 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer
2340 * access, flat address variant.
2341 *
2342 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2343 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2344 * @param[in] a_GCPtrMem The memory address.
2345 * @remarks Will return/long jump on errors.
2346 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2347 */
2348#ifndef IEM_WITH_SETJMP
2349# define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2350 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2351 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128) - 1))
2352#else
2353# define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2354 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2355#endif
2356
2357/**
2358 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess,
2359 * flat address variant.
2360 *
2361 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2362 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2363 * @param[in] a_GCPtrMem The memory address.
2364 * @remarks Will return/long jump on errors.
2365 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2366 */
2367#ifndef IEM_WITH_SETJMP
2368# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2369 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2370 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1))
2371#else
2372# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2373 (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2374#endif
2375
2376/**
2377 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess,
2378 * flat address variant.
2379 *
2380 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2381 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2382 * @param[in] a_GCPtrMem The memory address.
2383 * @remarks Will return/long jump on errors.
2384 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2385 */
2386#ifndef IEM_WITH_SETJMP
2387# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2388 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2389 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
2390#else
2391# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2392 (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2393#endif
2394
2395/**
2396 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess, flat
2397 * address variant.
2398 *
2399 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2400 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2401 * @param[in] a_GCPtrMem The memory address.
2402 * @remarks Will return/long jump on errors.
2403 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2404 */
2405#ifndef IEM_WITH_SETJMP
2406# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2407 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2408 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
2409#else
2410# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2411 (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2412#endif
2413
2414
2415/* misc */
2416
2417/**
2418 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
2419 *
2420 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
2421 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2422 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2423 * @param[in] a_GCPtrMem The memory address.
2424 * @remarks Will return/long jump on errors.
2425 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2426 */
2427#ifndef IEM_WITH_SETJMP
2428# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2429 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
2430 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2431#else
2432# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2433 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2434#endif
2435
2436/**
2437 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
2438 *
2439 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
2440 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2441 * @param[in] a_GCPtrMem The memory address.
2442 * @remarks Will return/long jump on errors.
2443 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2444 */
2445#ifndef IEM_WITH_SETJMP
2446# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
2447 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
2448 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2449#else
2450# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
2451 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2452#endif
2453
2454
2455/**
2456 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2457 *
2458 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2459 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2460 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2461 * @param[in] a_GCPtrMem The memory address.
2462 * @remarks Will return/long jump on errors.
2463 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2464 */
2465#ifndef IEM_WITH_SETJMP
2466# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2467 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
2468 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2469#else
2470# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2471 (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2472#endif
2473
2474/**
2475 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2476 *
2477 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2478 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2479 * @param[in] a_GCPtrMem The memory address.
2480 * @remarks Will return/long jump on errors.
2481 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2482 */
2483#ifndef IEM_WITH_SETJMP
2484# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2485 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
2486 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2487#else
2488# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2489 (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2490#endif
2491
2492
2493
2494/* commit + unmap */
2495
2496/** Commits the memory and unmaps guest memory previously mapped RW.
2497 * @remarks May return.
2498 * @note Implictly frees the a_bMapInfo variable.
2499 */
2500#ifndef IEM_WITH_SETJMP
2501# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2502#else
2503# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2504#endif
2505
2506/** Commits the memory and unmaps guest memory previously mapped ATOMIC.
2507 * @remarks May return.
2508 * @note Implictly frees the a_bMapInfo variable.
2509 */
2510#ifndef IEM_WITH_SETJMP
2511# define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2512#else
2513# define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2514#endif
2515
2516/** Commits the memory and unmaps guest memory previously mapped W.
2517 * @remarks May return.
2518 * @note Implictly frees the a_bMapInfo variable.
2519 */
2520#ifndef IEM_WITH_SETJMP
2521# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2522#else
2523# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
2524#endif
2525
2526/** Commits the memory and unmaps guest memory previously mapped R.
2527 * @remarks May return.
2528 * @note Implictly frees the a_bMapInfo variable.
2529 */
2530#ifndef IEM_WITH_SETJMP
2531# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2532#else
2533# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
2534#endif
2535
2536
2537/** Commits the memory and unmaps the guest memory unless the FPU status word
2538 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
2539 * that would cause FLD not to store.
2540 *
2541 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
2542 * store, while \#P will not.
2543 *
2544 * @remarks May in theory return - for now.
2545 * @note Implictly frees both the a_bMapInfo and a_u16FSW variables.
2546 */
2547#ifndef IEM_WITH_SETJMP
2548# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2549 if ( !(a_u16FSW & X86_FSW_ES) \
2550 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2551 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2552 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \
2553 else \
2554 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
2555 } while (0)
2556#else
2557# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2558 if ( !(a_u16FSW & X86_FSW_ES) \
2559 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2560 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2561 iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \
2562 else \
2563 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
2564 } while (0)
2565#endif
2566
2567/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
2568 * @note Implictly frees the a_bMapInfo variable. */
2569#ifndef IEM_WITH_SETJMP
2570# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmap(pVCpu, a_bMapInfo)
2571#else
2572# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
2573#endif
2574
2575
2576
2577/** Calculate efficient address from R/M. */
2578#ifndef IEM_WITH_SETJMP
2579# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2580 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff)))
2581#else
2582# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2583 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
2584#endif
2585
2586
2587/** The @a a_fSupportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */
2588#define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (false) {
2589#define IEM_MC_NATIVE_ELSE() } else {
2590#define IEM_MC_NATIVE_ENDIF() } ((void)0)
2591
2592#define IEM_MC_NATIVE_EMIT_0(a_fnEmitter)
2593#define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0) (void)(a0)
2594#define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
2595#define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2) (void)(a0), (void)(a1), (void)(a2)
2596#define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3) (void)(a0), (void)(a1), (void)(a2), (void)(a3)
2597#define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4)
2598#define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5)
2599#define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6)
2600#define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7)
2601
2602/** This can be used to direct the register allocator when dealing with
2603 * x86/AMD64 instructions (like SHL reg,CL) that takes fixed registers. */
2604#define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0)
2605
2606
2607#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
2608#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
2609#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
2610#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
2611#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
2612#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
2613#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
2614
2615
2616/** @def IEM_MC_CALL_CIMPL_HLP_RET
2617 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
2618 */
2619#ifdef VBOX_STRICT
2620#define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
2621 do { \
2622 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
2623 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \
2624 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \
2625 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \
2626 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \
2627 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
2628 if (rcStrictHlp == VINF_SUCCESS) \
2629 { \
2630 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
2631 || ( uRipBefore + cbInstr == pVCpu->cpum.GstCtx.rip \
2632 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \
2633 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \
2634 && uRipBefore == pVCpu->cpum.GstCtx.rip \
2635 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \
2636 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
2637 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, uRipBefore + cbInstr)); \
2638 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
2639 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
2640 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
2641 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
2642 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
2643 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2644 else \
2645 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
2646 == (fEflBefore & ~(X86_EFL_RF)), \
2647 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2648 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
2649 { \
2650 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
2651 AssertMsg( fExecBefore == fExecRecalc \
2652 /* in case ES, DS or SS was external initially (happens alot with HM): */ \
2653 || ( fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \
2654 && (fExecRecalc & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT), \
2655 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
2656 } \
2657 } \
2658 return rcStrictHlp; \
2659 } while (0)
2660#else
2661# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
2662#endif
2663
2664/**
2665 * Defers the rest of the instruction emulation to a C implementation routine
2666 * and returns, only taking the standard parameters.
2667 *
2668 * @param a_fFlags IEM_CIMPL_F_XXX.
2669 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2670 * in the native recompiler.
2671 * @param a_pfnCImpl The pointer to the C routine.
2672 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2673 */
2674#define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2675 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2676
2677/**
2678 * Defers the rest of instruction emulation to a C implementation routine and
2679 * returns, taking one argument in addition to the standard ones.
2680 *
2681 * @param a_fFlags IEM_CIMPL_F_XXX.
2682 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2683 * in the native recompiler.
2684 * @param a_pfnCImpl The pointer to the C routine.
2685 * @param a0 The argument.
2686 */
2687#define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2688 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2689
2690/**
2691 * Defers the rest of the instruction emulation to a C implementation routine
2692 * and returns, taking two arguments in addition to the standard ones.
2693 *
2694 * @param a_fFlags IEM_CIMPL_F_XXX.
2695 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2696 * in the native recompiler.
2697 * @param a_pfnCImpl The pointer to the C routine.
2698 * @param a0 The first extra argument.
2699 * @param a1 The second extra argument.
2700 */
2701#define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2702 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2703
2704/**
2705 * Defers the rest of the instruction emulation to a C implementation routine
2706 * and returns, taking three arguments in addition to the standard ones.
2707 *
2708 * @param a_fFlags IEM_CIMPL_F_XXX.
2709 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2710 * in the native recompiler.
2711 * @param a_pfnCImpl The pointer to the C routine.
2712 * @param a0 The first extra argument.
2713 * @param a1 The second extra argument.
2714 * @param a2 The third extra argument.
2715 */
2716#define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2717 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2718
2719/**
2720 * Defers the rest of the instruction emulation to a C implementation routine
2721 * and returns, taking four arguments in addition to the standard ones.
2722 *
2723 * @param a_fFlags IEM_CIMPL_F_XXX.
2724 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2725 * in the native recompiler.
2726 * @param a_pfnCImpl The pointer to the C routine.
2727 * @param a0 The first extra argument.
2728 * @param a1 The second extra argument.
2729 * @param a2 The third extra argument.
2730 * @param a3 The fourth extra argument.
2731 */
2732#define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
2733 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
2734
2735/**
2736 * Defers the rest of the instruction emulation to a C implementation routine
2737 * and returns, taking two arguments in addition to the standard ones.
2738 *
2739 * @param a_fFlags IEM_CIMPL_F_XXX.
2740 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2741 * in the native recompiler.
2742 * @param a_pfnCImpl The pointer to the C routine.
2743 * @param a0 The first extra argument.
2744 * @param a1 The second extra argument.
2745 * @param a2 The third extra argument.
2746 * @param a3 The fourth extra argument.
2747 * @param a4 The fifth extra argument.
2748 */
2749#define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
2750 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
2751
2752/**
2753 * Defers the entire instruction emulation to a C implementation routine and
2754 * returns, only taking the standard parameters.
2755 *
2756 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2757 *
2758 * @param a_fFlags IEM_CIMPL_F_XXX.
2759 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2760 * in the native recompiler.
2761 * @param a_pfnCImpl The pointer to the C routine.
2762 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2763 */
2764#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2765 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2766
2767/**
2768 * Defers the entire instruction emulation to a C implementation routine and
2769 * returns, taking one argument in addition to the standard ones.
2770 *
2771 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2772 *
2773 * @param a_fFlags IEM_CIMPL_F_XXX.
2774 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2775 * in the native recompiler.
2776 * @param a_pfnCImpl The pointer to the C routine.
2777 * @param a0 The argument.
2778 */
2779#define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2780 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2781
2782/**
2783 * Defers the entire instruction emulation to a C implementation routine and
2784 * returns, taking two arguments in addition to the standard ones.
2785 *
2786 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2787 *
2788 * @param a_fFlags IEM_CIMPL_F_XXX.
2789 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2790 * in the native recompiler.
2791 * @param a_pfnCImpl The pointer to the C routine.
2792 * @param a0 The first extra argument.
2793 * @param a1 The second extra argument.
2794 */
2795#define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2796 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2797
2798/**
2799 * Defers the entire instruction emulation to a C implementation routine and
2800 * returns, taking three arguments in addition to the standard ones.
2801 *
2802 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2803 *
2804 * @param a_fFlags IEM_CIMPL_F_XXX.
2805 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2806 * in the native recompiler.
2807 * @param a_pfnCImpl The pointer to the C routine.
2808 * @param a0 The first extra argument.
2809 * @param a1 The second extra argument.
2810 * @param a2 The third extra argument.
2811 */
2812#define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2813 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2814
2815
2816/**
2817 * Calls a FPU assembly implementation taking one visible argument.
2818 *
2819 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2820 * @param a0 The first extra argument.
2821 */
2822#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
2823 do { \
2824 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
2825 } while (0)
2826
2827/**
2828 * Calls a FPU assembly implementation taking two visible arguments.
2829 *
2830 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2831 * @param a0 The first extra argument.
2832 * @param a1 The second extra argument.
2833 */
2834#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
2835 do { \
2836 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2837 } while (0)
2838
2839/**
2840 * Calls a FPU assembly implementation taking three visible arguments.
2841 *
2842 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2843 * @param a0 The first extra argument.
2844 * @param a1 The second extra argument.
2845 * @param a2 The third extra argument.
2846 */
2847#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2848 do { \
2849 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2850 } while (0)
2851
2852#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
2853 do { \
2854 (a_FpuData).FSW = (a_FSW); \
2855 (a_FpuData).r80Result = *(a_pr80Value); \
2856 } while (0)
2857
2858/** Pushes FPU result onto the stack. */
2859#define IEM_MC_PUSH_FPU_RESULT(a_FpuData, a_uFpuOpcode) \
2860 iemFpuPushResult(pVCpu, &a_FpuData, a_uFpuOpcode)
2861/** Pushes FPU result onto the stack and sets the FPUDP. */
2862#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2863 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2864
2865/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
2866#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo, a_uFpuOpcode) \
2867 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo, a_uFpuOpcode)
2868
2869/** Stores FPU result in a stack register. */
2870#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg, a_uFpuOpcode) \
2871 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2872/** Stores FPU result in a stack register and pops the stack. */
2873#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg, a_uFpuOpcode) \
2874 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2875/** Stores FPU result in a stack register and sets the FPUDP. */
2876#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2877 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2878/** Stores FPU result in a stack register, sets the FPUDP, and pops the
2879 * stack. */
2880#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2881 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2882
2883/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
2884#define IEM_MC_UPDATE_FPU_OPCODE_IP(a_uFpuOpcode) \
2885 iemFpuUpdateOpcodeAndIp(pVCpu, a_uFpuOpcode)
2886/** Free a stack register (for FFREE and FFREEP). */
2887#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
2888 iemFpuStackFree(pVCpu, a_iStReg)
2889/** Increment the FPU stack pointer. */
2890#define IEM_MC_FPU_STACK_INC_TOP() \
2891 iemFpuStackIncTop(pVCpu)
2892/** Decrement the FPU stack pointer. */
2893#define IEM_MC_FPU_STACK_DEC_TOP() \
2894 iemFpuStackDecTop(pVCpu)
2895
2896/** Updates the FSW, FOP, FPUIP, and FPUCS. */
2897#define IEM_MC_UPDATE_FSW(a_u16FSW, a_uFpuOpcode) \
2898 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2899/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
2900#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW, a_uFpuOpcode) \
2901 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2902/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
2903#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2904 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2905/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
2906#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW, a_uFpuOpcode) \
2907 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2908/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
2909 * stack. */
2910#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2911 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2912/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
2913#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW, a_uFpuOpcode) \
2914 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2915
2916/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
2917#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst, a_uFpuOpcode) \
2918 iemFpuStackUnderflow(pVCpu, a_iStDst, a_uFpuOpcode)
2919/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2920 * stack. */
2921#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst, a_uFpuOpcode) \
2922 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst, a_uFpuOpcode)
2923/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2924 * FPUDS. */
2925#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2926 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2927/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2928 * FPUDS. Pops stack. */
2929#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2930 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2931/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2932 * stack twice. */
2933#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP(a_uFpuOpcode) \
2934 iemFpuStackUnderflowThenPopPop(pVCpu, a_uFpuOpcode)
2935/** Raises a FPU stack underflow exception for an instruction pushing a result
2936 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
2937#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW(a_uFpuOpcode) \
2938 iemFpuStackPushUnderflow(pVCpu, a_uFpuOpcode)
2939/** Raises a FPU stack underflow exception for an instruction pushing a result
2940 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
2941#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO(a_uFpuOpcode) \
2942 iemFpuStackPushUnderflowTwo(pVCpu, a_uFpuOpcode)
2943
2944/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2945 * FPUIP, FPUCS and FOP. */
2946#define IEM_MC_FPU_STACK_PUSH_OVERFLOW(a_uFpuOpcode) \
2947 iemFpuStackPushOverflow(pVCpu, a_uFpuOpcode)
2948/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2949 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
2950#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2951 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2952/** Prepares for using the FPU state.
2953 * Ensures that we can use the host FPU in the current context (RC+R0.
2954 * Ensures the guest FPU state in the CPUMCTX is up to date. */
2955#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
2956/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
2957#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
2958/** Actualizes the guest FPU state so it can be accessed and modified. */
2959#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
2960
2961/** Stores SSE SIMD result updating MXCSR. */
2962#define IEM_MC_STORE_SSE_RESULT(a_SseData, a_iXmmReg) \
2963 iemSseStoreResult(pVCpu, &a_SseData, a_iXmmReg)
2964/** Updates MXCSR. */
2965#define IEM_MC_SSE_UPDATE_MXCSR(a_fMxcsr) \
2966 iemSseUpdateMxcsr(pVCpu, a_fMxcsr)
2967
2968/** Prepares for using the SSE state.
2969 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
2970 * Ensures the guest SSE state in the CPUMCTX is up to date. */
2971#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
2972/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2973#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
2974/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
2975#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
2976
2977/** Prepares for using the AVX state.
2978 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
2979 * Ensures the guest AVX state in the CPUMCTX is up to date.
2980 * @note This will include the AVX512 state too when support for it is added
2981 * due to the zero extending feature of VEX instruction. */
2982#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
2983/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2984#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
2985/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
2986#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
2987
2988/**
2989 * Calls a MMX assembly implementation taking two visible arguments.
2990 *
2991 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2992 * @param a0 The first extra argument.
2993 * @param a1 The second extra argument.
2994 */
2995#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
2996 do { \
2997 IEM_MC_PREPARE_FPU_USAGE(); \
2998 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2999 } while (0)
3000
3001/**
3002 * Calls a MMX assembly implementation taking three visible arguments.
3003 *
3004 * @param a_pfnAImpl Pointer to the assembly MMX routine.
3005 * @param a0 The first extra argument.
3006 * @param a1 The second extra argument.
3007 * @param a2 The third extra argument.
3008 */
3009#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
3010 do { \
3011 IEM_MC_PREPARE_FPU_USAGE(); \
3012 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
3013 } while (0)
3014
3015
3016/**
3017 * Calls a SSE assembly implementation taking two visible arguments.
3018 *
3019 * @param a_pfnAImpl Pointer to the assembly SSE routine.
3020 * @param a0 The first extra argument.
3021 * @param a1 The second extra argument.
3022 */
3023#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
3024 do { \
3025 IEM_MC_PREPARE_SSE_USAGE(); \
3026 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
3027 } while (0)
3028
3029/**
3030 * Calls a SSE assembly implementation taking three visible arguments.
3031 *
3032 * @param a_pfnAImpl Pointer to the assembly SSE routine.
3033 * @param a0 The first extra argument.
3034 * @param a1 The second extra argument.
3035 * @param a2 The third extra argument.
3036 */
3037#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
3038 do { \
3039 IEM_MC_PREPARE_SSE_USAGE(); \
3040 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
3041 } while (0)
3042
3043
3044/**
3045 * Calls a AVX assembly implementation taking two visible arguments.
3046 *
3047 * There is one implicit zero'th argument, a pointer to the extended state.
3048 *
3049 * @param a_pfnAImpl Pointer to the assembly AVX routine.
3050 * @param a1 The first extra argument.
3051 * @param a2 The second extra argument.
3052 */
3053#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
3054 do { \
3055 IEM_MC_PREPARE_AVX_USAGE(); \
3056 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState, (a1), (a2)); \
3057 } while (0)
3058
3059/**
3060 * Calls a AVX assembly implementation taking three visible arguments.
3061 *
3062 * There is one implicit zero'th argument, a pointer to the extended state.
3063 *
3064 * @param a_pfnAImpl Pointer to the assembly AVX routine.
3065 * @param a1 The first extra argument.
3066 * @param a2 The second extra argument.
3067 * @param a3 The third extra argument.
3068 */
3069#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
3070 do { \
3071 IEM_MC_PREPARE_AVX_USAGE(); \
3072 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState, (a1), (a2), (a3)); \
3073 } while (0)
3074
3075/** @note Not for IOPL or IF testing. */
3076#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
3077/** @note Not for IOPL or IF testing. */
3078#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
3079/** @note Not for IOPL or IF testing. */
3080#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
3081/** @note Not for IOPL or IF testing. */
3082#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
3083/** @note Not for IOPL or IF testing. */
3084#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
3085 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3086 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3087/** @note Not for IOPL or IF testing. */
3088#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
3089 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3090 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3091/** @note Not for IOPL or IF testing. */
3092#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
3093 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
3094 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3095 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3096/** @note Not for IOPL or IF testing. */
3097#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
3098 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
3099 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3100 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3101#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
3102#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
3103#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
3104#define IEM_MC_IF_CX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.cx != 1) {
3105#define IEM_MC_IF_ECX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.ecx != 1) {
3106#define IEM_MC_IF_RCX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.rcx != 1) {
3107/** @note Not for IOPL or IF testing. */
3108#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3109 if ( pVCpu->cpum.GstCtx.cx != 1 \
3110 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3111/** @note Not for IOPL or IF testing. */
3112#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3113 if ( pVCpu->cpum.GstCtx.ecx != 1 \
3114 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3115/** @note Not for IOPL or IF testing. */
3116#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3117 if ( pVCpu->cpum.GstCtx.rcx != 1 \
3118 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3119/** @note Not for IOPL or IF testing. */
3120#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3121 if ( pVCpu->cpum.GstCtx.cx != 1 \
3122 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3123/** @note Not for IOPL or IF testing. */
3124#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3125 if ( pVCpu->cpum.GstCtx.ecx != 1 \
3126 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3127/** @note Not for IOPL or IF testing. */
3128#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3129 if ( pVCpu->cpum.GstCtx.rcx != 1 \
3130 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3131#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
3132#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
3133
3134#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
3135 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
3136#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
3137 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
3138#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
3139 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
3140#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
3141 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
3142#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
3143 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
3144#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
3145 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
3146#define IEM_MC_IF_FCW_IM() \
3147 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
3148#define IEM_MC_IF_MXCSR_XCPT_PENDING() \
3149 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
3150 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) {
3151
3152#define IEM_MC_ELSE() } else {
3153#define IEM_MC_ENDIF() } do {} while (0)
3154
3155
3156/** Recompiler debugging: Flush guest register shadow copies. */
3157#define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush) ((void)0)
3158
3159/** @} */
3160
3161#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
3162
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette