VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 103844

Last change on this file since 103844 was 103828, checked in by vboxsync, 13 months ago

VMM/IEM: Implemented simple (whole sale) status flag up update skipping for arithmetic operations with native emitter. bugref:10375

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 169.4 KB
Line 
1/* $Id: IEMMc.h 103828 2024-03-13 14:01:20Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42
43#define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) {
44#define IEM_MC_END() }
45
46/** Internal macro. */
47#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
48 do \
49 { \
50 VBOXSTRICTRC rcStrict2 = a_Expr; \
51 if (rcStrict2 == VINF_SUCCESS) \
52 { /* likely */ } \
53 else \
54 return rcStrict2; \
55 } while (0)
56
57
58/** Dummy MC that prevents native recompilation. */
59#define IEM_MC_NO_NATIVE_RECOMPILE() ((void)0)
60
61/** Advances RIP, finishes the instruction and returns.
62 * This may include raising debug exceptions and such. */
63#define IEM_MC_ADVANCE_RIP_AND_FINISH() return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
64/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
65#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
66 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
67/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
68 * @note only usable in 16-bit op size mode. */
69#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
70 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
71/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
72#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
73 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
74/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
75#define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) \
76 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), IEM_GET_INSTR_LEN(pVCpu))
77/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
78#define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) \
79 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP), IEM_GET_INSTR_LEN(pVCpu))
80/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
81#define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) \
82 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP), IEM_GET_INSTR_LEN(pVCpu))
83
84#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
85#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
86 do { \
87 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)))) \
88 { /* probable */ } \
89 else return iemRaiseDeviceNotAvailable(pVCpu); \
90 } while (0)
91#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
92 do { \
93 if (RT_LIKELY(!((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)))) \
94 { /* probable */ } \
95 else return iemRaiseDeviceNotAvailable(pVCpu); \
96 } while (0)
97#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
98 do { \
99 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))) \
100 { /* probable */ } \
101 else return iemRaiseMathFault(pVCpu); \
102 } while (0)
103#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
104 do { \
105 /* Since none of the bits we compare from XCR0, CR4 and CR0 overlap, it can \
106 be reduced to a single compare branch in the more probably code path. */ \
107 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) \
108 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
109 | (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) \
110 == (XSAVE_C_YMM | XSAVE_C_SSE | X86_CR4_OSXSAVE))) \
111 { /* probable */ } \
112 else if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
113 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) \
114 return iemRaiseUndefinedOpcode(pVCpu); \
115 else \
116 return iemRaiseDeviceNotAvailable(pVCpu); \
117 } while (0)
118AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR4_OSXSAVE));
119AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR0_TS));
120AssertCompile(!(X86_CR4_OSXSAVE & X86_CR0_TS));
121#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
122 do { \
123 /* Since the CR4 and CR0 bits doesn't overlap, it can be reduced to a
124 single compare branch in the more probable code path. */ \
125 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
126 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
127 == X86_CR4_OSFXSR)) \
128 { /* likely */ } \
129 else if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
130 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
131 return iemRaiseUndefinedOpcode(pVCpu); \
132 else \
133 return iemRaiseDeviceNotAvailable(pVCpu); \
134 } while (0)
135AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_CR4_OSFXSR));
136#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
137 do { \
138 /* Since the two CR0 bits doesn't overlap with FSW.ES, this can be reduced to a
139 single compare branch in the more probable code path. */ \
140 if (RT_LIKELY(!( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
141 | (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES)))) \
142 { /* probable */ } \
143 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
144 return iemRaiseUndefinedOpcode(pVCpu); \
145 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
146 return iemRaiseDeviceNotAvailable(pVCpu); \
147 else \
148 return iemRaiseMathFault(pVCpu); \
149 } while (0)
150AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_FSW_ES));
151/** @todo recomp: this one is slightly problematic as the recompiler doesn't
152 * count the CPL into the TB key. However it is safe enough for now, as
153 * it calls iemRaiseGeneralProtectionFault0 directly so no calls will be
154 * emitted for it. */
155#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
156 do { \
157 if (RT_LIKELY(IEM_GET_CPL(pVCpu) == 0)) { /* probable */ } \
158 else return iemRaiseGeneralProtectionFault0(pVCpu); \
159 } while (0)
160#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
161 do { \
162 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
163 else return iemRaiseGeneralProtectionFault0(pVCpu); \
164 } while (0)
165#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
166 do { \
167 if (RT_LIKELY( ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE) | IEM_GET_CPU_MODE(pVCpu)) \
168 == (X86_CR4_FSGSBASE | IEMMODE_64BIT))) \
169 { /* probable */ } \
170 else return iemRaiseUndefinedOpcode(pVCpu); \
171 } while (0)
172AssertCompile(X86_CR4_FSGSBASE > UINT8_MAX);
173#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
174 do { \
175 if (RT_LIKELY(IEM_IS_CANONICAL(a_u64Addr))) { /* likely */ } \
176 else return iemRaiseGeneralProtectionFault0(pVCpu); \
177 } while (0)
178#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
179 do { \
180 if (RT_LIKELY(( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
181 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)) \
182 { /* probable */ } \
183 else \
184 { \
185 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
186 return iemRaiseSimdFpException(pVCpu); \
187 return iemRaiseUndefinedOpcode(pVCpu); \
188 } \
189 } while (0)
190#define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
191 do { \
192 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
193 return iemRaiseSimdFpException(pVCpu); \
194 return iemRaiseUndefinedOpcode(pVCpu); \
195 } while (0)
196
197
198#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
199#define IEM_MC_LOCAL_ASSIGN(a_Type, a_Name, a_Value) a_Type a_Name = (a_Value)
200#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
201#define IEM_MC_NOREF(a_Name) RT_NOREF_PV(a_Name) /* NOP/liveness hack */
202#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
203#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
204#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
205/** @note IEMAllInstPython.py duplicates the expansion. */
206#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
207 uint32_t a_Name; \
208 uint32_t *a_pName = &a_Name
209#define IEM_MC_LOCAL_EFLAGS(a_Name) uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u
210#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
211 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
212#define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) do { \
213 AssertMsg((pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) == ((a_EFlags) & ~(a_fEflOutput)), \
214 ("eflags.u=%#x (%#x) vs %s=%#x (%#x) - diff %#x (a_fEflOutput=%#x)\n", \
215 pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput), pVCpu->cpum.GstCtx.eflags.u, #a_EFlags, \
216 (a_EFlags) & ~(a_fEflOutput), (a_EFlags), \
217 (pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) ^ ((a_EFlags) & ~(a_fEflOutput)), a_fEflOutput)); \
218 pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); \
219 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); \
220 } while (0)
221#define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags) IEM_MC_COMMIT_EFLAGS(a_EFlags)
222#define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput)
223
224/** ASSUMES the source variable not used after this statement. */
225#define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) (a_VarDst) = (a_VarSrcEol)
226
227#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
228#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
229#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
230#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
231#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
232#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
233#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
234#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
235#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
236#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
237#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
238#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
239#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
240#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
241#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
242#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
243#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
244#define IEM_MC_FETCH_GREG_PAIR_U32(a_u64Dst, a_iGRegLo, a_iGRegHi) do { \
245 (a_u64Dst).s.Lo = iemGRegFetchU32(pVCpu, (a_iGRegLo)); \
246 (a_u64Dst).s.Hi = iemGRegFetchU32(pVCpu, (a_iGRegHi)); \
247 } while(0)
248#define IEM_MC_FETCH_GREG_PAIR_U64(a_u128Dst, a_iGRegLo, a_iGRegHi) do { \
249 (a_u128Dst).s.Lo = iemGRegFetchU64(pVCpu, (a_iGRegLo)); \
250 (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
251 } while(0)
252#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
253 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
254 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
255 } while (0)
256#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
257 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
258 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
259 } while (0)
260#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
261 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
262 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
263 } while (0)
264/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
265#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
266 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
267 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
268 } while (0)
269#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
270 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
271 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
272 } while (0)
273/** @note Not for IOPL or IF testing or modification. */
274#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
275#define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_FETCH_EFLAGS(a_EFlags)
276#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u /* (only LAHF) */
277#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
278#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
279
280#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
281#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
282#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
283#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
284#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
285#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
286#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
287#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
288#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
289#define IEM_MC_STORE_GREG_PAIR_U32(a_iGRegLo, a_iGRegHi, a_u64Value) do { \
290 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint32_t)(a_u64Value).s.Lo; \
291 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint32_t)(a_u64Value).s.Hi; \
292 } while(0)
293#define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) do { \
294 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint64_t)(a_u128Value).s.Lo; \
295 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint64_t)(a_u128Value).s.Hi; \
296 } while(0)
297#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
298
299/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
300#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
301 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
302 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
303 } while (0)
304#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
305 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
306 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
307 } while (0)
308#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
309 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
310
311
312#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
313#define IEM_MC_REF_GREG_U8_CONST(a_pu8Dst, a_iGReg) (a_pu8Dst) = (uint8_t const *)iemGRegRefU8( pVCpu, (a_iGReg))
314#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
315#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
316/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
317 * Use IEM_MC_CLEAR_HIGH_GREG_U64! */
318#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
319#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
320#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
321#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
322#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
323#define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
324#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
325#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
326/** @note Not for IOPL or IF testing or modification.
327 * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
328#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
329#define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) IEM_MC_REF_EFLAGS(a_pEFlags)
330#define IEM_MC_REF_MXCSR(a_pfMxcsr) (a_pfMxcsr) = &pVCpu->cpum.GstCtx.XState.x87.MXCSR
331
332#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
333#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
334 do { \
335 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
336 *pu32Reg += (a_u32Value); \
337 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
338 } while (0)
339#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
340
341#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8Const) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u8Const)
342#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
343 do { \
344 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
345 *pu32Reg -= (a_u8Const); \
346 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
347 } while (0)
348#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
349#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
350
351#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
352#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
353#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
354#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
355#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
356#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
357#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
358
359#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
360#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
361#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
362#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
363
364#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
365#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
366#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
367
368#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
369#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
370#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
371
372#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
373#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
374#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
375
376#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
377
378#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
379#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
380#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
381
382#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
383
384#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
385
386#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
387#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
388#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
389 do { \
390 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
391 *pu32Reg &= (a_u32Value); \
392 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
393 } while (0)
394#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
395
396#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
397#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
398#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
399 do { \
400 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
401 *pu32Reg |= (a_u32Value); \
402 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
403 } while (0)
404#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
405
406#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
407#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
408#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
409
410/** @note Not for IOPL or IF modification. */
411#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
412/** @note Not for IOPL or IF modification. */
413#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
414/** @note Not for IOPL or IF modification. */
415#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
416
417#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
418
419/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
420#define IEM_MC_FPU_TO_MMX_MODE() do { \
421 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
422 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
423 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
424 } while (0)
425
426/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
427#define IEM_MC_FPU_FROM_MMX_MODE() do { \
428 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
429 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
430 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
431 } while (0)
432
433#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
434 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
435#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
436 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
437#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
438 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
439 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
440 } while (0)
441#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
442 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
443 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
444 } while (0)
445#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
446 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
447#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
448 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
449#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
450 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
451#define IEM_MC_MODIFIED_MREG(a_iMReg) \
452 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
453#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
454 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
455
456#define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
457 do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
458 if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
459 if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
460 if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
461 } while (0)
462#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
463 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
464 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
465 } while (0)
466#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
467 do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
468 (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
469 } while (0)
470#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
471 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
472#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
473 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
474#define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
475 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
476#define IEM_MC_FETCH_XREG_U8( a_u8Value, a_iXReg, a_iByte) \
477 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iByte)]; } while (0)
478#define IEM_MC_FETCH_XREG_PAIR_U128(a_Dst, a_iXReg1, a_iXReg2) \
479 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
480 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
481 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
482 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
483 } while (0)
484#define IEM_MC_FETCH_XREG_PAIR_XMM(a_Dst, a_iXReg1, a_iXReg2) \
485 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
486 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
487 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
488 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
489 } while (0)
490#define IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iXReg2) \
491 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
492 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
493 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
494 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
495 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
496 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
497 } while (0)
498#define IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iXReg2) \
499 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
500 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
501 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
502 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
503 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
504 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
505 } while (0)
506#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
507 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
508 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
509 } while (0)
510#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
511 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
512 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
513 } while (0)
514#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
515 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
516#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
517 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
518#define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
519 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
520#define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
521 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
522#define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
523 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iWord)] = (a_u16Value); } while (0)
524#define IEM_MC_STORE_XREG_U8(a_iXReg, a_iByte, a_u8Value) \
525 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iByte)] = (a_u8Value); } while (0)
526
527#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
528 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
529 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
530 } while (0)
531
532#define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
533 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
534#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
535 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
536#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
537 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
538#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
539 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
540 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
541 } while (0)
542
543#define IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX(a_iXRegDst, a_u8Src) \
544 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
545 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[0] = (a_u8Src); \
546 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[1] = (a_u8Src); \
547 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[2] = (a_u8Src); \
548 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[3] = (a_u8Src); \
549 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[4] = (a_u8Src); \
550 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[5] = (a_u8Src); \
551 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[6] = (a_u8Src); \
552 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[7] = (a_u8Src); \
553 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[8] = (a_u8Src); \
554 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[9] = (a_u8Src); \
555 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[10] = (a_u8Src); \
556 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[11] = (a_u8Src); \
557 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[12] = (a_u8Src); \
558 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[13] = (a_u8Src); \
559 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[14] = (a_u8Src); \
560 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[15] = (a_u8Src); \
561 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
562 } while (0)
563#define IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX(a_iXRegDst, a_u16Src) \
564 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
565 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[0] = (a_u16Src); \
566 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[1] = (a_u16Src); \
567 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[2] = (a_u16Src); \
568 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[3] = (a_u16Src); \
569 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[4] = (a_u16Src); \
570 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[5] = (a_u16Src); \
571 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[6] = (a_u16Src); \
572 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[7] = (a_u16Src); \
573 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
574 } while (0)
575#define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
576 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
577 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[0] = (a_u32Src); \
578 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[1] = (a_u32Src); \
579 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[2] = (a_u32Src); \
580 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[3] = (a_u32Src); \
581 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
582 } while (0)
583#define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
584 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
585 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[0] = (a_u64Src); \
586 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[1] = (a_u64Src); \
587 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
588 } while (0)
589
590#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
591 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
592#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
593 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
594#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
595 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
596#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
597 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
598#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
599 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
600#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
601 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
602#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
603 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
604#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
605 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
606 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
607 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
608 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
609 } while (0)
610
611#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
612 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
613 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
614 } while (0)
615#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc, a_iQWord) \
616 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
617 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[(a_iQWord)]; \
618 } while (0)
619#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
620 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
621 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
622 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
623 } while (0)
624#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
625 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
626 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
627 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
628 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
629 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
630 } while (0)
631
632#define IEM_MC_STORE_YREG_U128(a_iYRegDst, a_iDQword, a_u128Value) \
633 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
634 if ((a_iDQword) == 0) \
635 { \
636 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
637 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
638 } \
639 else \
640 { \
641 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
642 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
643 } \
644 } while (0)
645
646#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
647#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
648 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
649 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
650 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
651 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
652 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
653 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
654 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
655 } while (0)
656#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
657 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
658 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
659 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
660 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
661 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
662 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
663 } while (0)
664#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
665 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
666 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
667 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
668 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
669 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
670 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
671 } while (0)
672#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
673 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
674 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
675 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
676 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
677 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
678 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
679 } while (0)
680
681#define IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX(a_iYRegDst, a_u8Src) \
682 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
683 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[0] = (a_u8Src); \
684 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[1] = (a_u8Src); \
685 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[2] = (a_u8Src); \
686 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[3] = (a_u8Src); \
687 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[4] = (a_u8Src); \
688 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[5] = (a_u8Src); \
689 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[6] = (a_u8Src); \
690 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[7] = (a_u8Src); \
691 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[8] = (a_u8Src); \
692 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[9] = (a_u8Src); \
693 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[10] = (a_u8Src); \
694 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[11] = (a_u8Src); \
695 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[12] = (a_u8Src); \
696 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[13] = (a_u8Src); \
697 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[14] = (a_u8Src); \
698 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[15] = (a_u8Src); \
699 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[0] = (a_u8Src); \
700 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[1] = (a_u8Src); \
701 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[2] = (a_u8Src); \
702 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[3] = (a_u8Src); \
703 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[4] = (a_u8Src); \
704 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[5] = (a_u8Src); \
705 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[6] = (a_u8Src); \
706 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[7] = (a_u8Src); \
707 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[8] = (a_u8Src); \
708 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[9] = (a_u8Src); \
709 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[10] = (a_u8Src); \
710 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[11] = (a_u8Src); \
711 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[12] = (a_u8Src); \
712 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[13] = (a_u8Src); \
713 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[14] = (a_u8Src); \
714 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[15] = (a_u8Src); \
715 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
716 } while (0)
717#define IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX(a_iYRegDst, a_u16Src) \
718 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
719 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[0] = (a_u16Src); \
720 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[1] = (a_u16Src); \
721 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[2] = (a_u16Src); \
722 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[3] = (a_u16Src); \
723 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[4] = (a_u16Src); \
724 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[5] = (a_u16Src); \
725 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[6] = (a_u16Src); \
726 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[7] = (a_u16Src); \
727 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[0] = (a_u16Src); \
728 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[1] = (a_u16Src); \
729 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[2] = (a_u16Src); \
730 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[3] = (a_u16Src); \
731 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[4] = (a_u16Src); \
732 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[5] = (a_u16Src); \
733 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[6] = (a_u16Src); \
734 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[7] = (a_u16Src); \
735 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
736 } while (0)
737#define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
738 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
739 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
740 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = (a_u32Src); \
741 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[2] = (a_u32Src); \
742 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[3] = (a_u32Src); \
743 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[0] = (a_u32Src); \
744 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[1] = (a_u32Src); \
745 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[2] = (a_u32Src); \
746 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[3] = (a_u32Src); \
747 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
748 } while (0)
749#define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
750 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
751 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
752 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Src); \
753 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u64Src); \
754 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u64Src); \
755 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
756 } while (0)
757#define IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
758 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
759 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
760 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
761 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
762 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
763 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
764 } while (0)
765
766#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
767 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
768#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
769 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
770#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
771 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
772#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
773 do { uintptr_t const iYRegTmp = (a_iYReg); \
774 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
775 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
776 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
777 } while (0)
778
779#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
780 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
781 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
782 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
783 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
784 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
785 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
786 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
787 } while (0)
788#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
789 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
790 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
791 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
792 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
793 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
794 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
795 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
796 } while (0)
797#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
798 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
799 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
800 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
801 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
802 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
803 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
804 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
805 } while (0)
806
807#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
808 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
809 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
810 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
811 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
812 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
813 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
814 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
815 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
816 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
817 } while (0)
818#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
819 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
820 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
821 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
822 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
823 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
824 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
825 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
826 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
827 } while (0)
828#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
829 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
830 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
831 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
832 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
833 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
834 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
835 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
836 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
837 } while (0)
838#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
839 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
840 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
841 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
842 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
843 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
844 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
845 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
846 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
847 } while (0)
848#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
849 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
850 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
851 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
852 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
853 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
854 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
855 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
856 } while (0)
857#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
858 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
859 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
860 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
861 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
862 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
863 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
864 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
865 } while (0)
866
867#ifndef IEM_WITH_SETJMP
868# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
870# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
871 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
872# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
873 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
874#else
875# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
876 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
877# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
878 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
879# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
880 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
881
882# define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
883 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
884# define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
885 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
886# define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
887 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
888#endif
889
890#ifndef IEM_WITH_SETJMP
891# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
892 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
893# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
894 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
895# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
896 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
897#else
898# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
899 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
900# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
901 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
902# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
903 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
904
905# define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
906 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
907# define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
908 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
909# define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
910 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
911#endif
912
913#ifndef IEM_WITH_SETJMP
914# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
915 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
916# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
917 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
918# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
919 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
920#else
921# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
922 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
923# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
924 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
925# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
926 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
927
928# define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
929 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
930# define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
931 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
932# define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
933 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
934#endif
935
936#ifndef IEM_WITH_SETJMP
937# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
938 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
939# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
940 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
941# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
943# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
944 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
945#else
946# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
947 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
948# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
949 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
950# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
951 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
952# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
953 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
954
955# define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
956 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
957# define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
958 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
959# define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
960 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
961# define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
962 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
963#endif
964
965#ifndef IEM_WITH_SETJMP
966# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
967 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
968# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
969 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
970# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
971 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
972# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
973 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
974#else
975# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
976 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
977# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
978 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
979# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
980 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
981# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
982 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
983
984# define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
985 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
986# define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
987 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
988# define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
989 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
990# define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
991 iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
992#endif
993
994#ifndef IEM_WITH_SETJMP
995# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
996 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
997# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
998 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
999# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1000 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
1001
1002# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1003 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
1004# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1005 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
1006# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
1007 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_XmmDst).au32[(a_iDWord)], (a_iSeg), (a_GCPtrMem)))
1008# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
1009 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_XmmDst).au64[(a_iQWord)], (a_iSeg), (a_GCPtrMem)))
1010
1011# define IEM_MC_FETCH_MEM_U128_NO_AC_AND_XREG_U128(a_u128Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1012 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1013 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1014 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1015 } while (0)
1016
1017# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1018 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \
1019 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1020 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1021 } while (0)
1022
1023# define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1024 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1025 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1026 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_Dst).uSrc2.uXmm.au32[(a_iDWord2)], (a_iSeg2), (a_GCPtrMem2))); \
1027 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1028 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1029 } while (0)
1030
1031# define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1032 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1033 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_Dst).uSrc2.uXmm.au64[(a_iQWord2)], (a_iSeg2), (a_GCPtrMem2))); \
1034 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1035 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1036 } while (0)
1037
1038# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1039 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1040 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1041 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1042 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1043 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1044 } while (0)
1045# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1046 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
1047 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1048 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1049 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1050 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1051 } while (0)
1052
1053#else
1054# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
1055 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1056# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
1057 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1058# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1059 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1060
1061# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
1062 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1063# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1064 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1065# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1066 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1067# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
1068 (a_XmmDst).au32[(a_iDWord)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
1069# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
1070 (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
1071
1072# define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
1073 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1074# define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
1075 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1076# define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
1077 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), UINT8_MAX, (a_GCPtrMem))
1078
1079# define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
1080 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1081# define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
1082 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1083# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
1084 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, UINT8_MAX, (a_GCPtrMem))
1085# define IEM_MC_FETCH_MEM_FLAT_XMM_U32(a_XmmDst, a_iDWord, a_GCPtrMem) \
1086 (a_XmmDst).au32[(a_iDWord)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))
1087# define IEM_MC_FETCH_MEM_FLAT_XMM_U64(a_XmmDst, a_iQWord, a_GCPtrMem) \
1088 (a_XmmDst).au64[(a_iQWord)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))
1089
1090# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1091 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1092 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1093 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1094 } while (0)
1095# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1096 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1097 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1098 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1099 } while (0)
1100
1101# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1102 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
1103 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1104 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1105 } while (0)
1106# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1107 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, UINT8_MAX, (a_GCPtrMem2)); \
1108 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1109 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1110 } while (0)
1111
1112# define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1113 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1114 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1115 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1116 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1117 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1118 } while (0)
1119# define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
1120 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1121 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1122 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem2)); \
1123 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1124 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1125 } while (0)
1126
1127# define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1128 (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \
1129 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1130 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1131 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1132 } while (0)
1133# define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do { \
1134 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1135 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \
1136 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1137 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1138 } while (0)
1139
1140
1141# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1142 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1143 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1144 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1145 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1146 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1147 } while (0)
1148# define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1149 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1150 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1151 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1152 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1153 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1154 } while (0)
1155
1156# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1157 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1158 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1159 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1160 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1161 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1162 } while (0)
1163# define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1164 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1165 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1166 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1167 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1168 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1169 } while (0)
1170
1171#endif
1172
1173#ifndef IEM_WITH_SETJMP
1174# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1175 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1176# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1177 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1178# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1179 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1180
1181# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1182 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1183# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1184 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1185# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1186 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1187#else
1188# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1189 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1190# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1191 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1192# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1193 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1194
1195# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1196 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1197# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1198 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1199# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1200 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1201
1202# define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
1203 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), UINT8_MAX, (a_GCPtrMem))
1204# define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
1205 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), UINT8_MAX, (a_GCPtrMem))
1206# define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
1207 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), UINT8_MAX, (a_GCPtrMem))
1208
1209# define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
1210 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, UINT8_MAX, (a_GCPtrMem))
1211# define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
1212 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, UINT8_MAX, (a_GCPtrMem))
1213# define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
1214 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, UINT8_MAX, (a_GCPtrMem))
1215#endif
1216
1217
1218
1219#ifndef IEM_WITH_SETJMP
1220# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1221 do { \
1222 uint8_t u8Tmp; \
1223 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1224 (a_u16Dst) = u8Tmp; \
1225 } while (0)
1226# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1227 do { \
1228 uint8_t u8Tmp; \
1229 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1230 (a_u32Dst) = u8Tmp; \
1231 } while (0)
1232# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1233 do { \
1234 uint8_t u8Tmp; \
1235 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1236 (a_u64Dst) = u8Tmp; \
1237 } while (0)
1238# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1239 do { \
1240 uint16_t u16Tmp; \
1241 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1242 (a_u32Dst) = u16Tmp; \
1243 } while (0)
1244# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1245 do { \
1246 uint16_t u16Tmp; \
1247 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1248 (a_u64Dst) = u16Tmp; \
1249 } while (0)
1250# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1251 do { \
1252 uint32_t u32Tmp; \
1253 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1254 (a_u64Dst) = u32Tmp; \
1255 } while (0)
1256#else /* IEM_WITH_SETJMP */
1257# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1258 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1259# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1260 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1261# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1262 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1263# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1264 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1265# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1266 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1267# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1268 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1269
1270# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
1271 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1272# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
1273 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1274# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
1275 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1276# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
1277 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1278# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
1279 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1280# define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
1281 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1282#endif /* IEM_WITH_SETJMP */
1283
1284#ifndef IEM_WITH_SETJMP
1285# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1286 do { \
1287 uint8_t u8Tmp; \
1288 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1289 (a_u16Dst) = (int8_t)u8Tmp; \
1290 } while (0)
1291# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1292 do { \
1293 uint8_t u8Tmp; \
1294 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1295 (a_u32Dst) = (int8_t)u8Tmp; \
1296 } while (0)
1297# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1298 do { \
1299 uint8_t u8Tmp; \
1300 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1301 (a_u64Dst) = (int8_t)u8Tmp; \
1302 } while (0)
1303# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1304 do { \
1305 uint16_t u16Tmp; \
1306 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1307 (a_u32Dst) = (int16_t)u16Tmp; \
1308 } while (0)
1309# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1310 do { \
1311 uint16_t u16Tmp; \
1312 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1313 (a_u64Dst) = (int16_t)u16Tmp; \
1314 } while (0)
1315# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1316 do { \
1317 uint32_t u32Tmp; \
1318 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1319 (a_u64Dst) = (int32_t)u32Tmp; \
1320 } while (0)
1321#else /* IEM_WITH_SETJMP */
1322# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1323 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1324# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1325 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1326# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1327 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1328# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1329 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1330# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1331 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1332# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1333 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1334
1335# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
1336 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1337# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
1338 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1339# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
1340 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1341# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
1342 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1343# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
1344 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1345# define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
1346 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1347#endif /* IEM_WITH_SETJMP */
1348
1349#ifndef IEM_WITH_SETJMP
1350# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1351 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
1352# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1353 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
1354# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1355 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
1356# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1357 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
1358#else
1359# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1360 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
1361# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1362 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
1363# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1364 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
1365# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1366 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
1367
1368# define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
1369 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
1370# define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
1371 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
1372# define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
1373 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
1374# define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
1375 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
1376#endif
1377
1378#ifndef IEM_WITH_SETJMP
1379# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1380 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
1381# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1382 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
1383# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1384 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
1385# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1386 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
1387#else
1388# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1389 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
1390# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1391 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
1392# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1393 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
1394# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1395 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
1396
1397# define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
1398 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
1399# define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
1400 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
1401# define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
1402 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
1403# define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
1404 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
1405#endif
1406
1407#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
1408#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
1409#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
1410#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
1411#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
1412#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
1413#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
1414 do { \
1415 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1416 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
1417 } while (0)
1418#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
1419 do { \
1420 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1421 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
1422 } while (0)
1423
1424#ifndef IEM_WITH_SETJMP
1425# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1426 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
1427# define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1428 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
1429# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1430 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1431#else
1432# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1433 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1434# define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1435 iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1436# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1437 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1438
1439# define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
1440 iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1441# define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
1442 iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1443# define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
1444 iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
1445#endif
1446
1447#ifndef IEM_WITH_SETJMP
1448# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1449 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1450# define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1451 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1452# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1453 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1454#else
1455# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1456 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1457# define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1458 iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1459# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1460 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1461
1462# define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
1463 iemMemStoreDataU256Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), &(a_u256Value))
1464# define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
1465 iemMemStoreDataU256NoAcJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), &(a_u256Value))
1466# define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
1467 iemMemStoreDataU256AlignedAvxJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), &(a_u256Value))
1468#endif
1469
1470/* Regular stack push and pop: */
1471#ifndef IEM_WITH_SETJMP
1472# define IEM_MC_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1473# define IEM_MC_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1474# define IEM_MC_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
1475# define IEM_MC_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1476
1477# define IEM_MC_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1478# define IEM_MC_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
1479# define IEM_MC_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
1480#else
1481# define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
1482# define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
1483# define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
1484# define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
1485
1486# define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
1487# define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
1488# define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
1489#endif
1490
1491/* 32-bit flat stack push and pop: */
1492#ifndef IEM_WITH_SETJMP
1493# define IEM_MC_FLAT32_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1494# define IEM_MC_FLAT32_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1495# define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
1496
1497# define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1498# define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
1499#else
1500# define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
1501# define IEM_MC_FLAT32_PUSH_U32(a_u32Value) iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
1502# define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
1503
1504# define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
1505# define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
1506#endif
1507
1508/* 64-bit flat stack push and pop: */
1509#ifndef IEM_WITH_SETJMP
1510# define IEM_MC_FLAT64_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1511# define IEM_MC_FLAT64_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1512
1513# define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
1514# define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
1515#else
1516# define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
1517# define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
1518
1519# define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
1520# define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
1521#endif
1522
1523
1524/* 8-bit */
1525
1526/**
1527 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1528 * acccess, for atomic operations.
1529 *
1530 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1531 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1532 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1533 * @param[in] a_GCPtrMem The memory address.
1534 * @remarks Will return/long jump on errors.
1535 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1536 */
1537#ifndef IEM_WITH_SETJMP
1538# define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1539 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1540 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
1541#else
1542# define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1543 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1544#endif
1545
1546/**
1547 * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
1548 *
1549 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1550 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1551 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1552 * @param[in] a_GCPtrMem The memory address.
1553 * @remarks Will return/long jump on errors.
1554 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1555 */
1556#ifndef IEM_WITH_SETJMP
1557# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1558 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1559 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
1560#else
1561# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1562 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1563#endif
1564
1565/**
1566 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
1567 *
1568 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1569 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1570 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1571 * @param[in] a_GCPtrMem The memory address.
1572 * @remarks Will return/long jump on errors.
1573 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1574 */
1575#ifndef IEM_WITH_SETJMP
1576# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1577 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1578 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
1579#else
1580# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1581 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1582#endif
1583
1584/**
1585 * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
1586 *
1587 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1588 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1589 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1590 * @param[in] a_GCPtrMem The memory address.
1591 * @remarks Will return/long jump on errors.
1592 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1593 */
1594#ifndef IEM_WITH_SETJMP
1595# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1596 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
1597 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
1598#else
1599# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1600 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1601#endif
1602
1603/**
1604 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1605 * acccess, flat address variant.
1606 *
1607 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1608 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1609 * @param[in] a_GCPtrMem The memory address.
1610 * @remarks Will return/long jump on errors.
1611 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1612 */
1613#ifndef IEM_WITH_SETJMP
1614# define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1615 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1616 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
1617#else
1618# define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1619 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1620#endif
1621
1622/**
1623 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
1624 * address variant.
1625 *
1626 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1627 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1628 * @param[in] a_GCPtrMem The memory address.
1629 * @remarks Will return/long jump on errors.
1630 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1631 */
1632#ifndef IEM_WITH_SETJMP
1633# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1634 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1635 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
1636#else
1637# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1638 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1639#endif
1640
1641/**
1642 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
1643 * address variant.
1644 *
1645 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1646 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1647 * @param[in] a_GCPtrMem The memory address.
1648 * @remarks Will return/long jump on errors.
1649 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1650 */
1651#ifndef IEM_WITH_SETJMP
1652# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1653 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1654 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
1655#else
1656# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1657 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1658#endif
1659
1660/**
1661 * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
1662 * address variant.
1663 *
1664 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1665 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1666 * @param[in] a_GCPtrMem The memory address.
1667 * @remarks Will return/long jump on errors.
1668 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1669 */
1670#ifndef IEM_WITH_SETJMP
1671# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1672 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
1673 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
1674#else
1675# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1676 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1677#endif
1678
1679
1680/* 16-bit */
1681
1682/**
1683 * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess.
1684 *
1685 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1686 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1687 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1688 * @param[in] a_GCPtrMem The memory address.
1689 * @remarks Will return/long jump on errors.
1690 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1691 */
1692#ifndef IEM_WITH_SETJMP
1693# define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1694 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1695 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
1696#else
1697# define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1698 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1699#endif
1700
1701/**
1702 * Maps guest memory for word read+write direct (or bounce) buffer acccess.
1703 *
1704 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1705 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1706 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1707 * @param[in] a_GCPtrMem The memory address.
1708 * @remarks Will return/long jump on errors.
1709 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1710 */
1711#ifndef IEM_WITH_SETJMP
1712# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1713 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1714 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
1715#else
1716# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1717 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1718#endif
1719
1720/**
1721 * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
1722 *
1723 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1724 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1725 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1726 * @param[in] a_GCPtrMem The memory address.
1727 * @remarks Will return/long jump on errors.
1728 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1729 */
1730#ifndef IEM_WITH_SETJMP
1731# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1732 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1733 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
1734#else
1735# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1736 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1737#endif
1738
1739/**
1740 * Maps guest memory for word readonly direct (or bounce) buffer acccess.
1741 *
1742 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1743 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1744 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1745 * @param[in] a_GCPtrMem The memory address.
1746 * @remarks Will return/long jump on errors.
1747 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1748 */
1749#ifndef IEM_WITH_SETJMP
1750# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1751 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
1752 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
1753#else
1754# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1755 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1756#endif
1757
1758/**
1759 * Maps guest memory for word atomic read+write direct (or bounce) buffer
1760 * acccess, flat address variant.
1761 *
1762 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1763 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1764 * @param[in] a_GCPtrMem The memory address.
1765 * @remarks Will return/long jump on errors.
1766 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1767 */
1768#ifndef IEM_WITH_SETJMP
1769# define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1770 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1771 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
1772#else
1773# define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1774 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1775#endif
1776
1777/**
1778 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
1779 * address variant.
1780 *
1781 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1782 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1783 * @param[in] a_GCPtrMem The memory address.
1784 * @remarks Will return/long jump on errors.
1785 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1786 */
1787#ifndef IEM_WITH_SETJMP
1788# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1789 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1790 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
1791#else
1792# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1793 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1794#endif
1795
1796/**
1797 * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
1798 * address variant.
1799 *
1800 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1801 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1802 * @param[in] a_GCPtrMem The memory address.
1803 * @remarks Will return/long jump on errors.
1804 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1805 */
1806#ifndef IEM_WITH_SETJMP
1807# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1808 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1809 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
1810#else
1811# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1812 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1813#endif
1814
1815/**
1816 * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
1817 * address variant.
1818 *
1819 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1820 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1821 * @param[in] a_GCPtrMem The memory address.
1822 * @remarks Will return/long jump on errors.
1823 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1824 */
1825#ifndef IEM_WITH_SETJMP
1826# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1827 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
1828 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
1829#else
1830# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1831 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1832#endif
1833
1834/** int16_t alias. */
1835#ifndef IEM_WITH_SETJMP
1836# define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1837 IEM_MC_MEM_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
1838#else
1839# define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1840 (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1841#endif
1842
1843/** Flat int16_t alias. */
1844#ifndef IEM_WITH_SETJMP
1845# define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1846 IEM_MC_MEM_FLAT_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem)
1847#else
1848# define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1849 (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1850#endif
1851
1852
1853/* 32-bit */
1854
1855/**
1856 * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess.
1857 *
1858 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1859 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1860 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1861 * @param[in] a_GCPtrMem The memory address.
1862 * @remarks Will return/long jump on errors.
1863 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1864 */
1865#ifndef IEM_WITH_SETJMP
1866# define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1867 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1868 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
1869#else
1870# define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1871 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1872#endif
1873
1874/**
1875 * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
1876 *
1877 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1878 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1879 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1880 * @param[in] a_GCPtrMem The memory address.
1881 * @remarks Will return/long jump on errors.
1882 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1883 */
1884#ifndef IEM_WITH_SETJMP
1885# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1886 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1887 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
1888#else
1889# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1890 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1891#endif
1892
1893/**
1894 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
1895 *
1896 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1897 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1898 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1899 * @param[in] a_GCPtrMem The memory address.
1900 * @remarks Will return/long jump on errors.
1901 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1902 */
1903#ifndef IEM_WITH_SETJMP
1904# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1905 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1906 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
1907#else
1908# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1909 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1910#endif
1911
1912/**
1913 * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
1914 *
1915 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1916 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1917 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1918 * @param[in] a_GCPtrMem The memory address.
1919 * @remarks Will return/long jump on errors.
1920 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1921 */
1922#ifndef IEM_WITH_SETJMP
1923# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1924 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
1925 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
1926#else
1927# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1928 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1929#endif
1930
1931/**
1932 * Maps guest memory for dword atomic read+write direct (or bounce) buffer
1933 * acccess, flat address variant.
1934 *
1935 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1936 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1937 * @param[in] a_GCPtrMem The memory address.
1938 * @remarks Will return/long jump on errors.
1939 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1940 */
1941#ifndef IEM_WITH_SETJMP
1942# define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1943 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
1944 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
1945#else
1946# define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1947 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1948#endif
1949
1950/**
1951 * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
1952 * flat address variant.
1953 *
1954 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1955 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1956 * @param[in] a_GCPtrMem The memory address.
1957 * @remarks Will return/long jump on errors.
1958 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1959 */
1960#ifndef IEM_WITH_SETJMP
1961# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1962 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
1963 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
1964#else
1965# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1966 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1967#endif
1968
1969/**
1970 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
1971 * address variant.
1972 *
1973 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1974 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1975 * @param[in] a_GCPtrMem The memory address.
1976 * @remarks Will return/long jump on errors.
1977 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1978 */
1979#ifndef IEM_WITH_SETJMP
1980# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1981 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
1982 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
1983#else
1984# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1985 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1986#endif
1987
1988/**
1989 * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
1990 * address variant.
1991 *
1992 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1993 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1994 * @param[in] a_GCPtrMem The memory address.
1995 * @remarks Will return/long jump on errors.
1996 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1997 */
1998#ifndef IEM_WITH_SETJMP
1999# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2000 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
2001 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
2002#else
2003# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
2004 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2005#endif
2006
2007/** int32_t alias. */
2008#ifndef IEM_WITH_SETJMP
2009# define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2010 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2011#else
2012# define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2013 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2014#endif
2015
2016/** Flat int32_t alias. */
2017#ifndef IEM_WITH_SETJMP
2018# define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
2019 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem)
2020#else
2021# define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
2022 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2023#endif
2024
2025/** RTFLOAT32U alias. */
2026#ifndef IEM_WITH_SETJMP
2027# define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2028 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2029#else
2030# define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2031 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2032#endif
2033
2034/** Flat RTFLOAT32U alias. */
2035#ifndef IEM_WITH_SETJMP
2036# define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
2037 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem)
2038#else
2039# define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
2040 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2041#endif
2042
2043
2044/* 64-bit */
2045
2046/**
2047 * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess.
2048 *
2049 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2050 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2051 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2052 * @param[in] a_GCPtrMem The memory address.
2053 * @remarks Will return/long jump on errors.
2054 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2055 */
2056#ifndef IEM_WITH_SETJMP
2057# define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2058 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2059 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
2060#else
2061# define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2062 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2063#endif
2064
2065/**
2066 * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
2067 *
2068 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2069 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2070 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2071 * @param[in] a_GCPtrMem The memory address.
2072 * @remarks Will return/long jump on errors.
2073 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2074 */
2075#ifndef IEM_WITH_SETJMP
2076# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2077 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2078 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
2079#else
2080# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2081 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2082#endif
2083
2084/**
2085 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
2086 *
2087 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2088 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2089 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2090 * @param[in] a_GCPtrMem The memory address.
2091 * @remarks Will return/long jump on errors.
2092 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2093 */
2094#ifndef IEM_WITH_SETJMP
2095# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2096 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2097 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2098#else
2099# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2100 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2101#endif
2102
2103/**
2104 * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
2105 *
2106 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2107 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2108 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2109 * @param[in] a_GCPtrMem The memory address.
2110 * @remarks Will return/long jump on errors.
2111 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2112 */
2113#ifndef IEM_WITH_SETJMP
2114# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2115 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
2116 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
2117#else
2118# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2119 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2120#endif
2121
2122/**
2123 * Maps guest memory for qword atomic read+write direct (or bounce) buffer
2124 * acccess, flat address variant.
2125 *
2126 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2127 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2128 * @param[in] a_GCPtrMem The memory address.
2129 * @remarks Will return/long jump on errors.
2130 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2131 */
2132#ifndef IEM_WITH_SETJMP
2133# define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2134 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2135 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
2136#else
2137# define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2138 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2139#endif
2140
2141/**
2142 * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
2143 * flat address variant.
2144 *
2145 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2146 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2147 * @param[in] a_GCPtrMem The memory address.
2148 * @remarks Will return/long jump on errors.
2149 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2150 */
2151#ifndef IEM_WITH_SETJMP
2152# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2153 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2154 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
2155#else
2156# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2157 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2158#endif
2159
2160/**
2161 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
2162 * address variant.
2163 *
2164 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2165 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2166 * @param[in] a_GCPtrMem The memory address.
2167 * @remarks Will return/long jump on errors.
2168 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2169 */
2170#ifndef IEM_WITH_SETJMP
2171# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2172 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2173 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2174#else
2175# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2176 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2177#endif
2178
2179/**
2180 * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
2181 * address variant.
2182 *
2183 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
2184 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2185 * @param[in] a_GCPtrMem The memory address.
2186 * @remarks Will return/long jump on errors.
2187 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2188 */
2189#ifndef IEM_WITH_SETJMP
2190# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2191 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
2192 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
2193#else
2194# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
2195 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2196#endif
2197
2198/** int64_t alias. */
2199#ifndef IEM_WITH_SETJMP
2200# define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2201 IEM_MC_MEM_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2202#else
2203# define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2204 (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2205#endif
2206
2207/** Flat int64_t alias. */
2208#ifndef IEM_WITH_SETJMP
2209# define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
2210 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem)
2211#else
2212# define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
2213 (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2214#endif
2215
2216/** RTFLOAT64U alias. */
2217#ifndef IEM_WITH_SETJMP
2218# define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2219 IEM_MC_MEM_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
2220#else
2221# define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2222 (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2223#endif
2224
2225/** Flat RTFLOAT64U alias. */
2226#ifndef IEM_WITH_SETJMP
2227# define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
2228 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem)
2229#else
2230# define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
2231 (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2232#endif
2233
2234
2235/* 128-bit */
2236
2237/**
2238 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess.
2239 *
2240 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2241 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2242 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2243 * @param[in] a_GCPtrMem The memory address.
2244 * @remarks Will return/long jump on errors.
2245 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2246 */
2247#ifndef IEM_WITH_SETJMP
2248# define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2249 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
2250 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1))
2251#else
2252# define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2253 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2254#endif
2255
2256/**
2257 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess.
2258 *
2259 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2260 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2261 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2262 * @param[in] a_GCPtrMem The memory address.
2263 * @remarks Will return/long jump on errors.
2264 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2265 */
2266#ifndef IEM_WITH_SETJMP
2267# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2268 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
2269 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1))
2270#else
2271# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2272 (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2273#endif
2274
2275/**
2276 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess.
2277 *
2278 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2279 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2280 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2281 * @param[in] a_GCPtrMem The memory address.
2282 * @remarks Will return/long jump on errors.
2283 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2284 */
2285#ifndef IEM_WITH_SETJMP
2286# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2287 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
2288 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
2289#else
2290# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2291 (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2292#endif
2293
2294/**
2295 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess.
2296 *
2297 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2298 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2299 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2300 * @param[in] a_GCPtrMem The memory address.
2301 * @remarks Will return/long jump on errors.
2302 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2303 */
2304#ifndef IEM_WITH_SETJMP
2305# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2306 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
2307 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
2308#else
2309# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2310 (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2311#endif
2312
2313/**
2314 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer
2315 * access, flat address variant.
2316 *
2317 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2318 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2319 * @param[in] a_GCPtrMem The memory address.
2320 * @remarks Will return/long jump on errors.
2321 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
2322 */
2323#ifndef IEM_WITH_SETJMP
2324# define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2325 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2326 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128) - 1))
2327#else
2328# define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2329 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2330#endif
2331
2332/**
2333 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess,
2334 * flat address variant.
2335 *
2336 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2337 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2338 * @param[in] a_GCPtrMem The memory address.
2339 * @remarks Will return/long jump on errors.
2340 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
2341 */
2342#ifndef IEM_WITH_SETJMP
2343# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2344 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2345 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1))
2346#else
2347# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2348 (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2349#endif
2350
2351/**
2352 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess,
2353 * flat address variant.
2354 *
2355 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2356 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2357 * @param[in] a_GCPtrMem The memory address.
2358 * @remarks Will return/long jump on errors.
2359 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2360 */
2361#ifndef IEM_WITH_SETJMP
2362# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2363 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2364 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
2365#else
2366# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2367 (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2368#endif
2369
2370/**
2371 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess, flat
2372 * address variant.
2373 *
2374 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
2375 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2376 * @param[in] a_GCPtrMem The memory address.
2377 * @remarks Will return/long jump on errors.
2378 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
2379 */
2380#ifndef IEM_WITH_SETJMP
2381# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2382 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
2383 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
2384#else
2385# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
2386 (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2387#endif
2388
2389
2390/* misc */
2391
2392/**
2393 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
2394 *
2395 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
2396 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2397 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2398 * @param[in] a_GCPtrMem The memory address.
2399 * @remarks Will return/long jump on errors.
2400 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2401 */
2402#ifndef IEM_WITH_SETJMP
2403# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2404 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
2405 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2406#else
2407# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2408 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2409#endif
2410
2411/**
2412 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
2413 *
2414 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
2415 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2416 * @param[in] a_GCPtrMem The memory address.
2417 * @remarks Will return/long jump on errors.
2418 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2419 */
2420#ifndef IEM_WITH_SETJMP
2421# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
2422 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
2423 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2424#else
2425# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
2426 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2427#endif
2428
2429
2430/**
2431 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2432 *
2433 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2434 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2435 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
2436 * @param[in] a_GCPtrMem The memory address.
2437 * @remarks Will return/long jump on errors.
2438 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2439 */
2440#ifndef IEM_WITH_SETJMP
2441# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2442 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
2443 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2444#else
2445# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2446 (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2447#endif
2448
2449/**
2450 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2451 *
2452 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2453 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2454 * @param[in] a_GCPtrMem The memory address.
2455 * @remarks Will return/long jump on errors.
2456 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2457 */
2458#ifndef IEM_WITH_SETJMP
2459# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2460 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
2461 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
2462#else
2463# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2464 (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2465#endif
2466
2467
2468
2469/* commit + unmap */
2470
2471/** Commits the memory and unmaps guest memory previously mapped RW.
2472 * @remarks May return.
2473 * @note Implictly frees the a_bMapInfo variable.
2474 */
2475#ifndef IEM_WITH_SETJMP
2476# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2477#else
2478# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2479#endif
2480
2481/** Commits the memory and unmaps guest memory previously mapped ATOMIC.
2482 * @remarks May return.
2483 * @note Implictly frees the a_bMapInfo variable.
2484 */
2485#ifndef IEM_WITH_SETJMP
2486# define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2487#else
2488# define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2489#endif
2490
2491/** Commits the memory and unmaps guest memory previously mapped W.
2492 * @remarks May return.
2493 * @note Implictly frees the a_bMapInfo variable.
2494 */
2495#ifndef IEM_WITH_SETJMP
2496# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2497#else
2498# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
2499#endif
2500
2501/** Commits the memory and unmaps guest memory previously mapped R.
2502 * @remarks May return.
2503 * @note Implictly frees the a_bMapInfo variable.
2504 */
2505#ifndef IEM_WITH_SETJMP
2506# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
2507#else
2508# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
2509#endif
2510
2511
2512/** Commits the memory and unmaps the guest memory unless the FPU status word
2513 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
2514 * that would cause FLD not to store.
2515 *
2516 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
2517 * store, while \#P will not.
2518 *
2519 * @remarks May in theory return - for now.
2520 * @note Implictly frees both the a_bMapInfo and a_u16FSW variables.
2521 */
2522#ifndef IEM_WITH_SETJMP
2523# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2524 if ( !(a_u16FSW & X86_FSW_ES) \
2525 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2526 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2527 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \
2528 else \
2529 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
2530 } while (0)
2531#else
2532# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2533 if ( !(a_u16FSW & X86_FSW_ES) \
2534 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2535 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2536 iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \
2537 else \
2538 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
2539 } while (0)
2540#endif
2541
2542/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
2543 * @note Implictly frees the a_bMapInfo variable. */
2544#ifndef IEM_WITH_SETJMP
2545# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmap(pVCpu, a_bMapInfo)
2546#else
2547# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
2548#endif
2549
2550
2551
2552/** Calculate efficient address from R/M. */
2553#ifndef IEM_WITH_SETJMP
2554# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2555 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff)))
2556#else
2557# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2558 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
2559#endif
2560
2561
2562/** The @a a_fSpportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */
2563#define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (false) {
2564#define IEM_MC_NATIVE_ELSE() } else {
2565#define IEM_MC_NATIVE_ENDIF() } ((void)0)
2566
2567#define IEM_MC_NATIVE_EMIT_0(a_fnEmitter)
2568#define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0) (void)(a0)
2569#define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
2570#define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2) (void)(a0), (void)(a1), (void)(a2)
2571#define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3) (void)(a0), (void)(a1), (void)(a2), (void)(a3)
2572#define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4)
2573#define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5)
2574#define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6)
2575#define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7)
2576
2577
2578#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
2579#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
2580#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
2581#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
2582#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
2583#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
2584#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
2585
2586
2587/** @def IEM_MC_CALL_CIMPL_HLP_RET
2588 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
2589 */
2590#ifdef VBOX_STRICT
2591#define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
2592 do { \
2593 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
2594 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \
2595 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \
2596 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \
2597 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \
2598 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
2599 if (rcStrictHlp == VINF_SUCCESS) \
2600 { \
2601 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
2602 || ( uRipBefore + cbInstr == pVCpu->cpum.GstCtx.rip \
2603 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \
2604 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \
2605 && uRipBefore == pVCpu->cpum.GstCtx.rip \
2606 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \
2607 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
2608 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, uRipBefore + cbInstr)); \
2609 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
2610 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
2611 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
2612 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
2613 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
2614 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2615 else \
2616 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
2617 == (fEflBefore & ~(X86_EFL_RF)), \
2618 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2619 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
2620 { \
2621 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
2622 AssertMsg( fExecBefore == fExecRecalc \
2623 /* in case ES, DS or SS was external initially (happens alot with HM): */ \
2624 || ( fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \
2625 && (fExecRecalc & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT), \
2626 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
2627 } \
2628 } \
2629 return rcStrictHlp; \
2630 } while (0)
2631#else
2632# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
2633#endif
2634
2635/**
2636 * Defers the rest of the instruction emulation to a C implementation routine
2637 * and returns, only taking the standard parameters.
2638 *
2639 * @param a_fFlags IEM_CIMPL_F_XXX.
2640 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2641 * in the native recompiler.
2642 * @param a_pfnCImpl The pointer to the C routine.
2643 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2644 */
2645#define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2646 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2647
2648/**
2649 * Defers the rest of instruction emulation to a C implementation routine and
2650 * returns, taking one argument in addition to the standard ones.
2651 *
2652 * @param a_fFlags IEM_CIMPL_F_XXX.
2653 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2654 * in the native recompiler.
2655 * @param a_pfnCImpl The pointer to the C routine.
2656 * @param a0 The argument.
2657 */
2658#define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2659 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2660
2661/**
2662 * Defers the rest of the instruction emulation to a C implementation routine
2663 * and returns, taking two arguments in addition to the standard ones.
2664 *
2665 * @param a_fFlags IEM_CIMPL_F_XXX.
2666 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2667 * in the native recompiler.
2668 * @param a_pfnCImpl The pointer to the C routine.
2669 * @param a0 The first extra argument.
2670 * @param a1 The second extra argument.
2671 */
2672#define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2673 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2674
2675/**
2676 * Defers the rest of the instruction emulation to a C implementation routine
2677 * and returns, taking three arguments in addition to the standard ones.
2678 *
2679 * @param a_fFlags IEM_CIMPL_F_XXX.
2680 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2681 * in the native recompiler.
2682 * @param a_pfnCImpl The pointer to the C routine.
2683 * @param a0 The first extra argument.
2684 * @param a1 The second extra argument.
2685 * @param a2 The third extra argument.
2686 */
2687#define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2688 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2689
2690/**
2691 * Defers the rest of the instruction emulation to a C implementation routine
2692 * and returns, taking four arguments in addition to the standard ones.
2693 *
2694 * @param a_fFlags IEM_CIMPL_F_XXX.
2695 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2696 * in the native recompiler.
2697 * @param a_pfnCImpl The pointer to the C routine.
2698 * @param a0 The first extra argument.
2699 * @param a1 The second extra argument.
2700 * @param a2 The third extra argument.
2701 * @param a3 The fourth extra argument.
2702 */
2703#define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
2704 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
2705
2706/**
2707 * Defers the rest of the instruction emulation to a C implementation routine
2708 * and returns, taking two arguments in addition to the standard ones.
2709 *
2710 * @param a_fFlags IEM_CIMPL_F_XXX.
2711 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2712 * in the native recompiler.
2713 * @param a_pfnCImpl The pointer to the C routine.
2714 * @param a0 The first extra argument.
2715 * @param a1 The second extra argument.
2716 * @param a2 The third extra argument.
2717 * @param a3 The fourth extra argument.
2718 * @param a4 The fifth extra argument.
2719 */
2720#define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
2721 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
2722
2723/**
2724 * Defers the entire instruction emulation to a C implementation routine and
2725 * returns, only taking the standard parameters.
2726 *
2727 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2728 *
2729 * @param a_fFlags IEM_CIMPL_F_XXX.
2730 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2731 * in the native recompiler.
2732 * @param a_pfnCImpl The pointer to the C routine.
2733 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2734 */
2735#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2736 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2737
2738/**
2739 * Defers the entire instruction emulation to a C implementation routine and
2740 * returns, taking one argument in addition to the standard ones.
2741 *
2742 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2743 *
2744 * @param a_fFlags IEM_CIMPL_F_XXX.
2745 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2746 * in the native recompiler.
2747 * @param a_pfnCImpl The pointer to the C routine.
2748 * @param a0 The argument.
2749 */
2750#define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2751 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2752
2753/**
2754 * Defers the entire instruction emulation to a C implementation routine and
2755 * returns, taking two arguments in addition to the standard ones.
2756 *
2757 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2758 *
2759 * @param a_fFlags IEM_CIMPL_F_XXX.
2760 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2761 * in the native recompiler.
2762 * @param a_pfnCImpl The pointer to the C routine.
2763 * @param a0 The first extra argument.
2764 * @param a1 The second extra argument.
2765 */
2766#define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2767 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2768
2769/**
2770 * Defers the entire instruction emulation to a C implementation routine and
2771 * returns, taking three arguments in addition to the standard ones.
2772 *
2773 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2774 *
2775 * @param a_fFlags IEM_CIMPL_F_XXX.
2776 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2777 * in the native recompiler.
2778 * @param a_pfnCImpl The pointer to the C routine.
2779 * @param a0 The first extra argument.
2780 * @param a1 The second extra argument.
2781 * @param a2 The third extra argument.
2782 */
2783#define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2784 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2785
2786
2787/**
2788 * Calls a FPU assembly implementation taking one visible argument.
2789 *
2790 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2791 * @param a0 The first extra argument.
2792 */
2793#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
2794 do { \
2795 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
2796 } while (0)
2797
2798/**
2799 * Calls a FPU assembly implementation taking two visible arguments.
2800 *
2801 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2802 * @param a0 The first extra argument.
2803 * @param a1 The second extra argument.
2804 */
2805#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
2806 do { \
2807 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2808 } while (0)
2809
2810/**
2811 * Calls a FPU assembly implementation taking three visible arguments.
2812 *
2813 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2814 * @param a0 The first extra argument.
2815 * @param a1 The second extra argument.
2816 * @param a2 The third extra argument.
2817 */
2818#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2819 do { \
2820 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2821 } while (0)
2822
2823#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
2824 do { \
2825 (a_FpuData).FSW = (a_FSW); \
2826 (a_FpuData).r80Result = *(a_pr80Value); \
2827 } while (0)
2828
2829/** Pushes FPU result onto the stack. */
2830#define IEM_MC_PUSH_FPU_RESULT(a_FpuData, a_uFpuOpcode) \
2831 iemFpuPushResult(pVCpu, &a_FpuData, a_uFpuOpcode)
2832/** Pushes FPU result onto the stack and sets the FPUDP. */
2833#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2834 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2835
2836/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
2837#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo, a_uFpuOpcode) \
2838 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo, a_uFpuOpcode)
2839
2840/** Stores FPU result in a stack register. */
2841#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg, a_uFpuOpcode) \
2842 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2843/** Stores FPU result in a stack register and pops the stack. */
2844#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg, a_uFpuOpcode) \
2845 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2846/** Stores FPU result in a stack register and sets the FPUDP. */
2847#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2848 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2849/** Stores FPU result in a stack register, sets the FPUDP, and pops the
2850 * stack. */
2851#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2852 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2853
2854/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
2855#define IEM_MC_UPDATE_FPU_OPCODE_IP(a_uFpuOpcode) \
2856 iemFpuUpdateOpcodeAndIp(pVCpu, a_uFpuOpcode)
2857/** Free a stack register (for FFREE and FFREEP). */
2858#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
2859 iemFpuStackFree(pVCpu, a_iStReg)
2860/** Increment the FPU stack pointer. */
2861#define IEM_MC_FPU_STACK_INC_TOP() \
2862 iemFpuStackIncTop(pVCpu)
2863/** Decrement the FPU stack pointer. */
2864#define IEM_MC_FPU_STACK_DEC_TOP() \
2865 iemFpuStackDecTop(pVCpu)
2866
2867/** Updates the FSW, FOP, FPUIP, and FPUCS. */
2868#define IEM_MC_UPDATE_FSW(a_u16FSW, a_uFpuOpcode) \
2869 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2870/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
2871#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW, a_uFpuOpcode) \
2872 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2873/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
2874#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2875 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2876/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
2877#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW, a_uFpuOpcode) \
2878 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2879/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
2880 * stack. */
2881#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2882 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2883/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
2884#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW, a_uFpuOpcode) \
2885 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2886
2887/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
2888#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst, a_uFpuOpcode) \
2889 iemFpuStackUnderflow(pVCpu, a_iStDst, a_uFpuOpcode)
2890/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2891 * stack. */
2892#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst, a_uFpuOpcode) \
2893 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst, a_uFpuOpcode)
2894/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2895 * FPUDS. */
2896#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2897 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2898/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2899 * FPUDS. Pops stack. */
2900#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2901 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2902/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2903 * stack twice. */
2904#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP(a_uFpuOpcode) \
2905 iemFpuStackUnderflowThenPopPop(pVCpu, a_uFpuOpcode)
2906/** Raises a FPU stack underflow exception for an instruction pushing a result
2907 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
2908#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW(a_uFpuOpcode) \
2909 iemFpuStackPushUnderflow(pVCpu, a_uFpuOpcode)
2910/** Raises a FPU stack underflow exception for an instruction pushing a result
2911 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
2912#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO(a_uFpuOpcode) \
2913 iemFpuStackPushUnderflowTwo(pVCpu, a_uFpuOpcode)
2914
2915/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2916 * FPUIP, FPUCS and FOP. */
2917#define IEM_MC_FPU_STACK_PUSH_OVERFLOW(a_uFpuOpcode) \
2918 iemFpuStackPushOverflow(pVCpu, a_uFpuOpcode)
2919/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2920 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
2921#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2922 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2923/** Prepares for using the FPU state.
2924 * Ensures that we can use the host FPU in the current context (RC+R0.
2925 * Ensures the guest FPU state in the CPUMCTX is up to date. */
2926#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
2927/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
2928#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
2929/** Actualizes the guest FPU state so it can be accessed and modified. */
2930#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
2931
2932/** Stores SSE SIMD result updating MXCSR. */
2933#define IEM_MC_STORE_SSE_RESULT(a_SseData, a_iXmmReg) \
2934 iemSseStoreResult(pVCpu, &a_SseData, a_iXmmReg)
2935/** Updates MXCSR. */
2936#define IEM_MC_SSE_UPDATE_MXCSR(a_fMxcsr) \
2937 iemSseUpdateMxcsr(pVCpu, a_fMxcsr)
2938
2939/** Prepares for using the SSE state.
2940 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
2941 * Ensures the guest SSE state in the CPUMCTX is up to date. */
2942#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
2943/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2944#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
2945/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
2946#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
2947
2948/** Prepares for using the AVX state.
2949 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
2950 * Ensures the guest AVX state in the CPUMCTX is up to date.
2951 * @note This will include the AVX512 state too when support for it is added
2952 * due to the zero extending feature of VEX instruction. */
2953#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
2954/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2955#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
2956/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
2957#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
2958
2959/**
2960 * Calls a MMX assembly implementation taking two visible arguments.
2961 *
2962 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2963 * @param a0 The first extra argument.
2964 * @param a1 The second extra argument.
2965 */
2966#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
2967 do { \
2968 IEM_MC_PREPARE_FPU_USAGE(); \
2969 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2970 } while (0)
2971
2972/**
2973 * Calls a MMX assembly implementation taking three visible arguments.
2974 *
2975 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2976 * @param a0 The first extra argument.
2977 * @param a1 The second extra argument.
2978 * @param a2 The third extra argument.
2979 */
2980#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2981 do { \
2982 IEM_MC_PREPARE_FPU_USAGE(); \
2983 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2984 } while (0)
2985
2986
2987/**
2988 * Calls a SSE assembly implementation taking two visible arguments.
2989 *
2990 * @param a_pfnAImpl Pointer to the assembly SSE routine.
2991 * @param a0 The first extra argument.
2992 * @param a1 The second extra argument.
2993 */
2994#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
2995 do { \
2996 IEM_MC_PREPARE_SSE_USAGE(); \
2997 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2998 } while (0)
2999
3000/**
3001 * Calls a SSE assembly implementation taking three visible arguments.
3002 *
3003 * @param a_pfnAImpl Pointer to the assembly SSE routine.
3004 * @param a0 The first extra argument.
3005 * @param a1 The second extra argument.
3006 * @param a2 The third extra argument.
3007 */
3008#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
3009 do { \
3010 IEM_MC_PREPARE_SSE_USAGE(); \
3011 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
3012 } while (0)
3013
3014
3015/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
3016 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ...
3017 * @note IEMAllInstPython.py duplicates the expansion. */
3018#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
3019 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
3020
3021/**
3022 * Calls a AVX assembly implementation taking two visible arguments.
3023 *
3024 * There is one implicit zero'th argument, a pointer to the extended state.
3025 *
3026 * @param a_pfnAImpl Pointer to the assembly AVX routine.
3027 * @param a1 The first extra argument.
3028 * @param a2 The second extra argument.
3029 */
3030#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
3031 do { \
3032 IEM_MC_PREPARE_AVX_USAGE(); \
3033 a_pfnAImpl(pXState, (a1), (a2)); \
3034 } while (0)
3035
3036/**
3037 * Calls a AVX assembly implementation taking three visible arguments.
3038 *
3039 * There is one implicit zero'th argument, a pointer to the extended state.
3040 *
3041 * @param a_pfnAImpl Pointer to the assembly AVX routine.
3042 * @param a1 The first extra argument.
3043 * @param a2 The second extra argument.
3044 * @param a3 The third extra argument.
3045 */
3046#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
3047 do { \
3048 IEM_MC_PREPARE_AVX_USAGE(); \
3049 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
3050 } while (0)
3051
3052/** @note Not for IOPL or IF testing. */
3053#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
3054/** @note Not for IOPL or IF testing. */
3055#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
3056/** @note Not for IOPL or IF testing. */
3057#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
3058/** @note Not for IOPL or IF testing. */
3059#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
3060/** @note Not for IOPL or IF testing. */
3061#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
3062 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3063 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3064/** @note Not for IOPL or IF testing. */
3065#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
3066 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3067 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3068/** @note Not for IOPL or IF testing. */
3069#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
3070 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
3071 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3072 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3073/** @note Not for IOPL or IF testing. */
3074#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
3075 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
3076 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
3077 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
3078#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
3079#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
3080#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
3081#define IEM_MC_IF_CX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.cx != 1) {
3082#define IEM_MC_IF_ECX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.ecx != 1) {
3083#define IEM_MC_IF_RCX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.rcx != 1) {
3084/** @note Not for IOPL or IF testing. */
3085#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3086 if ( pVCpu->cpum.GstCtx.cx != 1 \
3087 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3088/** @note Not for IOPL or IF testing. */
3089#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3090 if ( pVCpu->cpum.GstCtx.ecx != 1 \
3091 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3092/** @note Not for IOPL or IF testing. */
3093#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
3094 if ( pVCpu->cpum.GstCtx.rcx != 1 \
3095 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3096/** @note Not for IOPL or IF testing. */
3097#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3098 if ( pVCpu->cpum.GstCtx.cx != 1 \
3099 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3100/** @note Not for IOPL or IF testing. */
3101#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3102 if ( pVCpu->cpum.GstCtx.ecx != 1 \
3103 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3104/** @note Not for IOPL or IF testing. */
3105#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
3106 if ( pVCpu->cpum.GstCtx.rcx != 1 \
3107 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
3108#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
3109#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
3110
3111#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
3112 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
3113#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
3114 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
3115#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
3116 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
3117#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
3118 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
3119#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
3120 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
3121#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
3122 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
3123#define IEM_MC_IF_FCW_IM() \
3124 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
3125#define IEM_MC_IF_MXCSR_XCPT_PENDING() \
3126 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
3127 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) {
3128
3129#define IEM_MC_ELSE() } else {
3130#define IEM_MC_ENDIF() } do {} while (0)
3131
3132
3133/** Recompiler debugging: Flush guest register shadow copies. */
3134#define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush) ((void)0)
3135
3136/** @} */
3137
3138#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
3139
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette