VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 100847

Last change on this file since 100847 was 100847, checked in by vboxsync, 20 months ago

VMM/IEM: Dedicated code for IEM_MC_MEM_COMMIT_AND_UNMAP_RW/WO/RO. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 135.9 KB
Line 
1/* $Id: IEMMc.h 100847 2023-08-09 23:27:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
43#define IEM_MC_END() }
44
45/** Internal macro. */
46#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
47 do \
48 { \
49 VBOXSTRICTRC rcStrict2 = a_Expr; \
50 if (rcStrict2 == VINF_SUCCESS) \
51 { /* likely */ } \
52 else \
53 return rcStrict2; \
54 } while (0)
55
56
57/** Advances RIP, finishes the instruction and returns.
58 * This may include raising debug exceptions and such. */
59#define IEM_MC_ADVANCE_RIP_AND_FINISH() return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
60/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
61#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
62 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
63/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
64 * @note only usable in 16-bit op size mode. */
65#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
66 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
67/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
68#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
69 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
70/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
71#define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) return iemRegRipJumpU16AndFinishClearningRF((pVCpu), (a_u16NewIP))
72/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
73#define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) return iemRegRipJumpU32AndFinishClearningRF((pVCpu), (a_u32NewIP))
74/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
75#define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) return iemRegRipJumpU64AndFinishClearningRF((pVCpu), (a_u64NewIP))
76
77#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
78#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
79 do { \
80 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)))) \
81 { /* probable */ } \
82 else return iemRaiseDeviceNotAvailable(pVCpu); \
83 } while (0)
84#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
85 do { \
86 if (RT_LIKELY(!((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)))) \
87 { /* probable */ } \
88 else return iemRaiseDeviceNotAvailable(pVCpu); \
89 } while (0)
90#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
91 do { \
92 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))) \
93 { /* probable */ } \
94 else return iemRaiseMathFault(pVCpu); \
95 } while (0)
96#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
97 do { \
98 /* Since none of the bits we compare from XCR0, CR4 and CR0 overlap, it can \
99 be reduced to a single compare branch in the more probably code path. */ \
100 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) \
101 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
102 | (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) \
103 == (XSAVE_C_YMM | XSAVE_C_SSE | X86_CR4_OSXSAVE))) \
104 { /* probable */ } \
105 else if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
106 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) \
107 return iemRaiseUndefinedOpcode(pVCpu); \
108 else \
109 return iemRaiseDeviceNotAvailable(pVCpu); \
110 } while (0)
111AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR4_OSXSAVE));
112AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR0_TS));
113AssertCompile(!(X86_CR4_OSXSAVE & X86_CR0_TS));
114#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
115 do { \
116 /* Since the CR4 and CR0 bits doesn't overlap, it can be reduced to a
117 single compare branch in the more probable code path. */ \
118 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
119 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
120 == X86_CR4_OSFXSR)) \
121 { /* likely */ } \
122 else if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
123 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
124 return iemRaiseUndefinedOpcode(pVCpu); \
125 else \
126 return iemRaiseDeviceNotAvailable(pVCpu); \
127 } while (0)
128AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_CR4_OSFXSR));
129#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
130 do { \
131 /* Since the two CR0 bits doesn't overlap with FSW.ES, this can be reduced to a
132 single compare branch in the more probable code path. */ \
133 if (RT_LIKELY(!( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
134 | (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES)))) \
135 { /* probable */ } \
136 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
137 return iemRaiseUndefinedOpcode(pVCpu); \
138 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
139 return iemRaiseDeviceNotAvailable(pVCpu); \
140 else \
141 return iemRaiseMathFault(pVCpu); \
142 } while (0)
143AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_FSW_ES));
144#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
145 do { \
146 if (RT_LIKELY(IEM_GET_CPL(pVCpu) == 0)) { /* probable */ } \
147 else return iemRaiseGeneralProtectionFault0(pVCpu); \
148 } while (0)
149#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
150 do { \
151 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
152 else return iemRaiseGeneralProtectionFault0(pVCpu); \
153 } while (0)
154#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
155 do { \
156 if (RT_LIKELY( ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE) | IEM_GET_CPU_MODE(pVCpu)) \
157 == (X86_CR4_FSGSBASE | IEMMODE_64BIT))) \
158 { /* probable */ } \
159 else return iemRaiseUndefinedOpcode(pVCpu); \
160 } while (0)
161AssertCompile(X86_CR4_FSGSBASE > UINT8_MAX);
162#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
163 do { \
164 if (RT_LIKELY(IEM_IS_CANONICAL(a_u64Addr))) { /* likely */ } \
165 else return iemRaiseGeneralProtectionFault0(pVCpu); \
166 } while (0)
167#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
168 do { \
169 if (RT_LIKELY(( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
170 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)) \
171 { /* probable */ } \
172 else \
173 { \
174 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
175 return iemRaiseSimdFpException(pVCpu); \
176 return iemRaiseUndefinedOpcode(pVCpu); \
177 } \
178 } while (0)
179#define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
180 do { \
181 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
182 return iemRaiseSimdFpException(pVCpu); \
183 return iemRaiseUndefinedOpcode(pVCpu); \
184 } while (0)
185
186
187#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
188#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
189#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
190#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
191#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
192#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
193#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
194 uint32_t a_Name; \
195 uint32_t *a_pName = &a_Name
196#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
197 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
198
199#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
200#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
201#define IEM_MC_ASSIGN_U8_SX_U64(a_u64VarOrArg, a_u8CVariableOrConst) \
202 (a_u64VarOrArg) = (int8_t)(a_u8CVariableOrConst)
203#define IEM_MC_ASSIGN_U32_SX_U64(a_u64VarOrArg, a_u32CVariableOrConst) \
204 (a_u64VarOrArg) = (int32_t)(a_u32CVariableOrConst)
205
206#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
207#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
208#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
209#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
210#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
211#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
212#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
213#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
214#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
215#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
216#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
217#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
218#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
219#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
220#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
221#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
222#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
223#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
224 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
225 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
226 } while (0)
227#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
228 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
229 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
230 } while (0)
231#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
232 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
233 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
234 } while (0)
235/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
236#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
237 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
238 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
239 } while (0)
240#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
241 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
242 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
243 } while (0)
244/** @note Not for IOPL or IF testing or modification. */
245#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
246#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
247#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
248#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
249
250#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
251#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
252#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
253#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
254#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
255#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
256#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
257#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
258#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
259#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
260#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
261/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
262#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
264 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
265 } while (0)
266#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
267 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
268 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
269 } while (0)
270#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
271 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
272
273
274#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
275#define IEM_MC_REF_GREG_U8_CONST(a_pu8Dst, a_iGReg) (a_pu8Dst) = (uint8_t const *)iemGRegRefU8( pVCpu, (a_iGReg))
276#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
277#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
278/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
279 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
280#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
281#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
282#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
283#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
284#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
285#define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
286#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
287#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
288/** @note Not for IOPL or IF testing or modification.
289 * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
290#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
291#define IEM_MC_REF_MXCSR(a_pfMxcsr) (a_pfMxcsr) = &pVCpu->cpum.GstCtx.XState.x87.MXCSR
292
293#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
294#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
295#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
296 do { \
297 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
298 *pu32Reg += (a_u32Value); \
299 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
300 } while (0)
301#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
302
303#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
304#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
305#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
306 do { \
307 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
308 *pu32Reg -= (a_u32Value); \
309 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
310 } while (0)
311#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
312#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
313
314#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
315#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
316#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
317#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
318#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
319#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
320#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
321
322#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
323#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
324#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
325#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
326
327#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
328#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
329#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
330
331#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
332#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
333#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
334
335#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
336#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
337#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
338
339#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
340
341#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
342#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
343#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
344
345#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
346
347#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
348
349#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
350#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
351#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
352 do { \
353 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
354 *pu32Reg &= (a_u32Value); \
355 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
356 } while (0)
357#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
358
359#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
360#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
361#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
362 do { \
363 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
364 *pu32Reg |= (a_u32Value); \
365 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
366 } while (0)
367#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
368
369#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
370#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
371#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
372
373/** @note Not for IOPL or IF modification. */
374#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
375/** @note Not for IOPL or IF modification. */
376#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
377/** @note Not for IOPL or IF modification. */
378#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
379
380#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
381
382/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
383#define IEM_MC_FPU_TO_MMX_MODE() do { \
384 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
385 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
386 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
387 } while (0)
388
389/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
390#define IEM_MC_FPU_FROM_MMX_MODE() do { \
391 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
392 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
393 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
394 } while (0)
395
396#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
397 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
398#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
399 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
400#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
401 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
402 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
403 } while (0)
404#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
405 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
406 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
407 } while (0)
408#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
409 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
410#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
411 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
412#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
413 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
414#define IEM_MC_MODIFIED_MREG(a_iMReg) \
415 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
416#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
417 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
418
419#define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
420 do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
421 if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
422 if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
423 if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
424 } while (0)
425#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
426 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
427 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
428 } while (0)
429#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
430 do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
431 (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
432 } while (0)
433#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
434 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
435#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
436 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
437#define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
438 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
439#define IEM_MC_FETCH_XREG_U8( a_u8Value, a_iXReg, a_iByte) \
440 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iByte)]; } while (0)
441#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
442 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
443 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
444 } while (0)
445#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
446 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
447 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
448 } while (0)
449#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
450 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
451#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
452 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
453#define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
454 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
455#define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
456 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
457#define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
458 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iWord)] = (a_u16Value); } while (0)
459#define IEM_MC_STORE_XREG_U8(a_iXReg, a_iByte, a_u8Value) \
460 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iByte)] = (a_u8Value); } while (0)
461
462#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
463 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
464 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
465 } while (0)
466
467#define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
468 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
469#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
470 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
471#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
472 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
473#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
474 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
475 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
476 } while (0)
477#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
478 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
479
480#define IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX(a_iXRegDst, a_u8Src) \
481 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
482 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[0] = (a_u8Src); \
483 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[1] = (a_u8Src); \
484 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[2] = (a_u8Src); \
485 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[3] = (a_u8Src); \
486 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[4] = (a_u8Src); \
487 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[5] = (a_u8Src); \
488 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[6] = (a_u8Src); \
489 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[7] = (a_u8Src); \
490 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[8] = (a_u8Src); \
491 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[9] = (a_u8Src); \
492 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[10] = (a_u8Src); \
493 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[11] = (a_u8Src); \
494 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[12] = (a_u8Src); \
495 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[13] = (a_u8Src); \
496 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[14] = (a_u8Src); \
497 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[15] = (a_u8Src); \
498 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
499 } while (0)
500#define IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX(a_iXRegDst, a_u16Src) \
501 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
502 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[0] = (a_u16Src); \
503 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[1] = (a_u16Src); \
504 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[2] = (a_u16Src); \
505 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[3] = (a_u16Src); \
506 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[4] = (a_u16Src); \
507 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[5] = (a_u16Src); \
508 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[6] = (a_u16Src); \
509 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[7] = (a_u16Src); \
510 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
511 } while (0)
512#define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
513 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
514 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[0] = (a_u32Src); \
515 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[1] = (a_u32Src); \
516 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[2] = (a_u32Src); \
517 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[3] = (a_u32Src); \
518 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
519 } while (0)
520#define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
521 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
522 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[0] = (a_u64Src); \
523 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[1] = (a_u64Src); \
524 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
525 } while (0)
526
527#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
528 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
529#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
530 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
531#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
532 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
533#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
534 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
535#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
536 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
537#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
538 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
539#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
540 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
541#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
542 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
543 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
544 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
545 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
546 } while (0)
547
548#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
549 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
550 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
551 } while (0)
552#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
553 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
554 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
555 } while (0)
556#define IEM_MC_FETCH_YREG_2ND_U64(a_u64Dst, a_iYRegSrc) \
557 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
558 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
559 } while (0)
560#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
561 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
562 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
563 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
564 } while (0)
565#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
566 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
567 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
568 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
569 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
570 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
571 } while (0)
572
573#define IEM_MC_STORE_YREG_U128(a_iYRegDst, a_iDQword, a_u128Value) \
574 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
575 if ((a_iDQword) == 0) \
576 { \
577 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
578 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
579 } \
580 else \
581 { \
582 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
583 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
584 } \
585 } while (0)
586
587#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
588#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
589 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
590 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
591 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
592 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
593 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
594 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
595 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
596 } while (0)
597#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
598 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
599 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
600 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
601 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
602 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
603 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
604 } while (0)
605#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
606 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
607 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
608 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
609 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
610 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
611 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
612 } while (0)
613#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
614 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
615 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
616 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
617 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
618 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
619 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
620 } while (0)
621
622#define IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX(a_iYRegDst, a_u8Src) \
623 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
624 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[0] = (a_u8Src); \
625 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[1] = (a_u8Src); \
626 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[2] = (a_u8Src); \
627 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[3] = (a_u8Src); \
628 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[4] = (a_u8Src); \
629 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[5] = (a_u8Src); \
630 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[6] = (a_u8Src); \
631 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[7] = (a_u8Src); \
632 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[8] = (a_u8Src); \
633 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[9] = (a_u8Src); \
634 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[10] = (a_u8Src); \
635 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[11] = (a_u8Src); \
636 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[12] = (a_u8Src); \
637 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[13] = (a_u8Src); \
638 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[14] = (a_u8Src); \
639 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[15] = (a_u8Src); \
640 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[0] = (a_u8Src); \
641 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[1] = (a_u8Src); \
642 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[2] = (a_u8Src); \
643 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[3] = (a_u8Src); \
644 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[4] = (a_u8Src); \
645 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[5] = (a_u8Src); \
646 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[6] = (a_u8Src); \
647 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[7] = (a_u8Src); \
648 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[8] = (a_u8Src); \
649 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[9] = (a_u8Src); \
650 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[10] = (a_u8Src); \
651 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[11] = (a_u8Src); \
652 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[12] = (a_u8Src); \
653 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[13] = (a_u8Src); \
654 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[14] = (a_u8Src); \
655 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[15] = (a_u8Src); \
656 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
657 } while (0)
658#define IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX(a_iYRegDst, a_u16Src) \
659 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
660 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[0] = (a_u16Src); \
661 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[1] = (a_u16Src); \
662 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[2] = (a_u16Src); \
663 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[3] = (a_u16Src); \
664 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[4] = (a_u16Src); \
665 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[5] = (a_u16Src); \
666 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[6] = (a_u16Src); \
667 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[7] = (a_u16Src); \
668 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[0] = (a_u16Src); \
669 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[1] = (a_u16Src); \
670 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[2] = (a_u16Src); \
671 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[3] = (a_u16Src); \
672 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[4] = (a_u16Src); \
673 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[5] = (a_u16Src); \
674 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[6] = (a_u16Src); \
675 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[7] = (a_u16Src); \
676 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
677 } while (0)
678#define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
679 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
680 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
681 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = (a_u32Src); \
682 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[2] = (a_u32Src); \
683 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[3] = (a_u32Src); \
684 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[0] = (a_u32Src); \
685 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[1] = (a_u32Src); \
686 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[2] = (a_u32Src); \
687 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[3] = (a_u32Src); \
688 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
689 } while (0)
690#define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
691 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
692 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
693 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Src); \
694 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u64Src); \
695 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u64Src); \
696 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
697 } while (0)
698#define IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
699 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
700 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
701 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
702 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
703 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
704 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
705 } while (0)
706
707#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
708 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
709#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
710 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
711#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
712 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
713#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
714 do { uintptr_t const iYRegTmp = (a_iYReg); \
715 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
716 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
717 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
718 } while (0)
719
720#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
721 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
722 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
723 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
724 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
725 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
726 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
727 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
728 } while (0)
729#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
730 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
731 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
732 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
733 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
734 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
735 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
736 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
737 } while (0)
738#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
739 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
740 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
741 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
742 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
743 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
744 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
745 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
746 } while (0)
747
748#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
749 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
750 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
751 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
752 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
753 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
754 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
755 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
756 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
757 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
758 } while (0)
759#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
760 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
761 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
762 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
763 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
764 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
765 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
766 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
767 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
768 } while (0)
769#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
770 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
771 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
772 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
773 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
774 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
775 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
776 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
777 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
778 } while (0)
779#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
780 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
781 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
782 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
783 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
784 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
785 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
786 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
787 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
788 } while (0)
789#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
790 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
791 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
792 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
793 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
794 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
795 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
796 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
797 } while (0)
798#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
799 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
800 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
801 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
802 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
803 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
804 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
805 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
806 } while (0)
807
808#ifndef IEM_WITH_SETJMP
809# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
811# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
813# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
815#else
816# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
817 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
818# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
819 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
820# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
821 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
822
823# define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
824 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
825# define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
826 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
827# define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
828 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
829#endif
830
831#ifndef IEM_WITH_SETJMP
832# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
834# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
836# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
837 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
838#else
839# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
840 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
841# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
842 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
843# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
844 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
845
846# define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
847 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
848# define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
849 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
850# define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
851 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
852#endif
853
854#ifndef IEM_WITH_SETJMP
855# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
856 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
857# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
858 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
859# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
860 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
861#else
862# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
863 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
864# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
865 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
866# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
867 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
868
869# define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
870 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
871# define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
872 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
873# define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
874 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
875#endif
876
877#ifdef SOME_UNUSED_FUNCTION
878# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
879 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
880#endif
881
882#ifndef IEM_WITH_SETJMP
883# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
884 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
885# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
887# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
888 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
889# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
890 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
891#else
892# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
893 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
894# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
895 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
896# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
897 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
898# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
899 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
900
901# define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
902 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
903# define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
904 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
905# define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
906 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
907# define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
908 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
909#endif
910
911#ifndef IEM_WITH_SETJMP
912# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
913 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
914# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
915 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
916# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
917 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
918# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
919 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
920#else
921# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
922 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
923# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
924 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
925# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
926 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
927# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
928 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
929
930# define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
931 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
932# define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
933 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
934# define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
935 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), UINT8_MAX, (a_GCPtrMem))
936# define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
937 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), UINT8_MAX, (a_GCPtrMem))
938#endif
939
940#ifndef IEM_WITH_SETJMP
941# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
943# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
944 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
945# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
946 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
947
948# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
949 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
950# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
951 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
952# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
953 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
954# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
955 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_XmmDst).au32[(a_iDWord)], (a_iSeg), (a_GCPtrMem)))
956# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
957 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_XmmDst).au64[(a_iQWord)], (a_iSeg), (a_GCPtrMem)))
958#else
959# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
960 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
961# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
962 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
963# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
964 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
965
966# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
967 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
968# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
969 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
970# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
971 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
972# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
973 (a_XmmDst).au32[(a_iDWord)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
974# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
975 (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
976
977# define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
978 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), UINT8_MAX, (a_GCPtrMem))
979# define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
980 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), UINT8_MAX, (a_GCPtrMem))
981# define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
982 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), UINT8_MAX, (a_GCPtrMem))
983
984# define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
985 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, UINT8_MAX, (a_GCPtrMem))
986# define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
987 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, UINT8_MAX, (a_GCPtrMem))
988# define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
989 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, UINT8_MAX, (a_GCPtrMem))
990# define IEM_MC_FETCH_MEM_FLAT_XMM_U32(a_XmmDst, a_iDWord, a_GCPtrMem) \
991 (a_XmmDst).au32[(a_iDWord)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))
992# define IEM_MC_FETCH_MEM_FLAT_XMM_U64(a_XmmDst, a_iQWord, a_GCPtrMem) \
993 (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem))
994#endif
995
996#ifndef IEM_WITH_SETJMP
997# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
998 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
999# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1000 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1001# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1002 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
1003
1004# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1005 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1006# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1007 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1008# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1009 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
1010#else
1011# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1012 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1013# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1014 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1015# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1016 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1017
1018# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1019 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1020# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1021 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1022# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1023 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1024
1025# define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
1026 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), UINT8_MAX, (a_GCPtrMem))
1027# define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
1028 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), UINT8_MAX, (a_GCPtrMem))
1029# define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
1030 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), UINT8_MAX, (a_GCPtrMem))
1031
1032# define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
1033 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, UINT8_MAX, (a_GCPtrMem))
1034# define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
1035 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, UINT8_MAX, (a_GCPtrMem))
1036# define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
1037 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, UINT8_MAX, (a_GCPtrMem))
1038#endif
1039
1040
1041
1042#ifndef IEM_WITH_SETJMP
1043# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1044 do { \
1045 uint8_t u8Tmp; \
1046 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1047 (a_u16Dst) = u8Tmp; \
1048 } while (0)
1049# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1050 do { \
1051 uint8_t u8Tmp; \
1052 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1053 (a_u32Dst) = u8Tmp; \
1054 } while (0)
1055# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1056 do { \
1057 uint8_t u8Tmp; \
1058 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1059 (a_u64Dst) = u8Tmp; \
1060 } while (0)
1061# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1062 do { \
1063 uint16_t u16Tmp; \
1064 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1065 (a_u32Dst) = u16Tmp; \
1066 } while (0)
1067# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1068 do { \
1069 uint16_t u16Tmp; \
1070 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1071 (a_u64Dst) = u16Tmp; \
1072 } while (0)
1073# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1074 do { \
1075 uint32_t u32Tmp; \
1076 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1077 (a_u64Dst) = u32Tmp; \
1078 } while (0)
1079#else /* IEM_WITH_SETJMP */
1080# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1081 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1082# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1083 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1084# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1085 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1086# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1087 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1088# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1089 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1090# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1091 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1092
1093# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
1094 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1095# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
1096 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1097# define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
1098 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1099# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
1100 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1101# define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
1102 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1103# define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
1104 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1105#endif /* IEM_WITH_SETJMP */
1106
1107#ifndef IEM_WITH_SETJMP
1108# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1109 do { \
1110 uint8_t u8Tmp; \
1111 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1112 (a_u16Dst) = (int8_t)u8Tmp; \
1113 } while (0)
1114# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1115 do { \
1116 uint8_t u8Tmp; \
1117 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1118 (a_u32Dst) = (int8_t)u8Tmp; \
1119 } while (0)
1120# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1121 do { \
1122 uint8_t u8Tmp; \
1123 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
1124 (a_u64Dst) = (int8_t)u8Tmp; \
1125 } while (0)
1126# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1127 do { \
1128 uint16_t u16Tmp; \
1129 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1130 (a_u32Dst) = (int16_t)u16Tmp; \
1131 } while (0)
1132# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1133 do { \
1134 uint16_t u16Tmp; \
1135 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
1136 (a_u64Dst) = (int16_t)u16Tmp; \
1137 } while (0)
1138# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1139 do { \
1140 uint32_t u32Tmp; \
1141 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
1142 (a_u64Dst) = (int32_t)u32Tmp; \
1143 } while (0)
1144#else /* IEM_WITH_SETJMP */
1145# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1146 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1147# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1148 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1149# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1150 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1151# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1152 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1153# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1154 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1155# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1156 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1157
1158# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
1159 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1160# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
1161 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1162# define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
1163 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1164# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
1165 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1166# define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
1167 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1168# define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
1169 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1170#endif /* IEM_WITH_SETJMP */
1171
1172#ifndef IEM_WITH_SETJMP
1173# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1174 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
1175# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1176 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
1177# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1178 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
1179# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1180 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
1181#else
1182# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1183 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
1184# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1185 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
1186# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1187 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
1188# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1189 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
1190
1191# define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
1192 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
1193# define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
1194 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
1195# define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
1196 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
1197# define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
1198 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
1199#endif
1200
1201#ifndef IEM_WITH_SETJMP
1202# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1203 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
1204# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1205 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
1206# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1207 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
1208# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1209 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
1210#else
1211# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1212 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
1213# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1214 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
1215# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1216 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
1217# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1218 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
1219
1220# define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
1221 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
1222# define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
1223 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
1224# define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
1225 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
1226# define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
1227 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
1228#endif
1229
1230#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
1231#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
1232#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
1233#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
1234#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
1235#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
1236#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
1237 do { \
1238 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1239 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
1240 } while (0)
1241#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
1242 do { \
1243 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1244 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
1245 } while (0)
1246
1247#ifndef IEM_WITH_SETJMP
1248# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1249 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1250# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1251 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1252#else
1253# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1254 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1255# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1256 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1257
1258# define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
1259 iemMemStoreDataU128Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
1260# define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
1261 iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
1262#endif
1263
1264#ifndef IEM_WITH_SETJMP
1265# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1266 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1267# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1268 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1269#else
1270# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1271 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1272# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1273 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1274
1275# define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
1276 iemMemStoreDataU256Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), &(a_u256Value))
1277# define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
1278 iemMemStoreDataU256AlignedAvxJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), &(a_u256Value))
1279#endif
1280
1281/* Regular stack push and pop: */
1282#define IEM_MC_PUSH_U16(a_u16Value) \
1283 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1284#define IEM_MC_PUSH_U32(a_u32Value) \
1285 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1286#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
1287 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
1288#define IEM_MC_PUSH_U64(a_u64Value) \
1289 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1290
1291#define IEM_MC_POP_U16(a_pu16Value) \
1292 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
1293#define IEM_MC_POP_U32(a_pu32Value) \
1294 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
1295#define IEM_MC_POP_U64(a_pu64Value) \
1296 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
1297
1298#define IEM_MC_POP_EX_U16(a_pu16Value, a_) \
1299 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16Ex(pVCpu, (a_pu16Value), (a_pNewRsp)))
1300#define IEM_MC_POP_EX_U32(a_pu32Value) \
1301 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
1302#define IEM_MC_POP_EX_U64(a_pu64Value) \
1303 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
1304
1305/* Flat stack push and pop: */
1306#define IEM_MC_FLAT_PUSH_U16(a_u16Value) \
1307 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1308#define IEM_MC_FLAT_PUSH_U32(a_u32Value) \
1309 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1310#define IEM_MC_FLAT_PUSH_U32_SREG(a_u32Value) \
1311 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
1312#define IEM_MC_FLAT_PUSH_U64(a_u64Value) \
1313 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1314
1315#define IEM_MC_FLAT_POP_U16(a_pu16Value) \
1316 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
1317#define IEM_MC_FLAT_POP_U32(a_pu32Value) \
1318 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
1319#define IEM_MC_FLAT_POP_U64(a_pu64Value) \
1320 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
1321
1322#define IEM_MC_FLAT_POP_EX_U16(a_pu16Value, a_) \
1323 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16Ex(pVCpu, (a_pu16Value), (a_pNewRsp)))
1324#define IEM_MC_FLAT_POP_EX_U32(a_pu32Value) \
1325 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
1326#define IEM_MC_FLAT_POP_EX_U64(a_pu64Value) \
1327 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
1328
1329
1330/** Maps guest memory for direct or bounce buffered access.
1331 * The purpose is to pass it to an operand implementation, thus the a_iArg.
1332 * @remarks May return.
1333 * @deprecated
1334 */
1335#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
1336 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \
1337 (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
1338
1339/** Flat variant of IEM_MC_MEM_MAP.
1340 * @deprecated
1341 */
1342#define IEM_MC_MEM_FLAT_MAP(a_pMem, a_fAccess, a_GCPtrMem, a_iArg) \
1343 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), UINT8_MAX, \
1344 (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
1345
1346/** Maps guest memory for direct or bounce buffered access.
1347 * The purpose is to pass it to an operand implementation, thus the a_iArg.
1348 * @remarks May return.
1349 * @deprecated
1350 */
1351#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_cbAlign, a_iArg) \
1352 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \
1353 (a_GCPtrMem), (a_fAccess), (a_cbAlign)))
1354
1355/** Flat variant of IEM_MC_MEM_MAP_EX.
1356 * @deprecated
1357 */
1358#define IEM_MC_MEM_FLAT_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_GCPtrMem, a_cbAlign, a_iArg) \
1359 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), UINT8_MAX, \
1360 (a_GCPtrMem), (a_fAccess), (a_cbAlign)))
1361
1362/** Commits the memory and unmaps the guest memory.
1363 * @remarks May return.
1364 * @deprecated
1365 */
1366#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
1367 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
1368
1369
1370/* 8-bit */
1371
1372/**
1373 * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
1374 *
1375 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1376 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1377 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1378 * @param[in] a_GCPtrMem The memory address.
1379 * @remarks Will return/long jump on errors.
1380 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1381 */
1382#ifndef IEM_WITH_SETJMP
1383# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1384 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \
1385 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)); \
1386 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1387 } while (0)
1388#else
1389# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1390 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1391#endif
1392
1393/**
1394 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
1395 *
1396 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1397 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1398 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1399 * @param[in] a_GCPtrMem The memory address.
1400 * @remarks Will return/long jump on errors.
1401 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1402 */
1403#ifndef IEM_WITH_SETJMP
1404# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1405 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \
1406 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)); \
1407 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1408 } while (0)
1409#else
1410# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1411 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1412#endif
1413
1414/**
1415 * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
1416 *
1417 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1418 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1419 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1420 * @param[in] a_GCPtrMem The memory address.
1421 * @remarks Will return/long jump on errors.
1422 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1423 */
1424#ifndef IEM_WITH_SETJMP
1425# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1426 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \
1427 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)); \
1428 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1429 } while (0)
1430#else
1431# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1432 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1433#endif
1434
1435/**
1436 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
1437 * address variant.
1438 *
1439 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1440 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1441 * @param[in] a_GCPtrMem The memory address.
1442 * @remarks Will return/long jump on errors.
1443 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1444 */
1445#ifndef IEM_WITH_SETJMP
1446# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1447 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \
1448 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)); \
1449 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1450 } while (0)
1451#else
1452# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1453 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1454#endif
1455
1456/**
1457 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
1458 * address variant.
1459 *
1460 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1461 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1462 * @param[in] a_GCPtrMem The memory address.
1463 * @remarks Will return/long jump on errors.
1464 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1465 */
1466#ifndef IEM_WITH_SETJMP
1467# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1468 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \
1469 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)); \
1470 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1471 } while (0)
1472#else
1473# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1474 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1475#endif
1476
1477/**
1478 * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
1479 * address variant.
1480 *
1481 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1482 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1483 * @param[in] a_GCPtrMem The memory address.
1484 * @remarks Will return/long jump on errors.
1485 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1486 */
1487#ifndef IEM_WITH_SETJMP
1488# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1489 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \
1490 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)); \
1491 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1492 } while (0)
1493#else
1494# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1495 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1496#endif
1497
1498
1499/* 16-bit */
1500
1501/**
1502 * Maps guest memory for word read+write direct (or bounce) buffer acccess.
1503 *
1504 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1505 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1506 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1507 * @param[in] a_GCPtrMem The memory address.
1508 * @remarks Will return/long jump on errors.
1509 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1510 */
1511#ifndef IEM_WITH_SETJMP
1512# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1513 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \
1514 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)); \
1515 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1516 } while (0)
1517#else
1518# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1519 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1520#endif
1521
1522/**
1523 * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
1524 *
1525 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1526 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1527 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1528 * @param[in] a_GCPtrMem The memory address.
1529 * @remarks Will return/long jump on errors.
1530 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1531 */
1532#ifndef IEM_WITH_SETJMP
1533# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1534 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \
1535 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)); \
1536 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1537 } while (0)
1538#else
1539# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1540 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1541#endif
1542
1543/**
1544 * Maps guest memory for word readonly direct (or bounce) buffer acccess.
1545 *
1546 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1547 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1548 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1549 * @param[in] a_GCPtrMem The memory address.
1550 * @remarks Will return/long jump on errors.
1551 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1552 */
1553#ifndef IEM_WITH_SETJMP
1554# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1555 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \
1556 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)); \
1557 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1558 } while (0)
1559#else
1560# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1561 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1562#endif
1563
1564/**
1565 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
1566 * address variant.
1567 *
1568 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1569 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1570 * @param[in] a_GCPtrMem The memory address.
1571 * @remarks Will return/long jump on errors.
1572 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1573 */
1574#ifndef IEM_WITH_SETJMP
1575# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1576 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \
1577 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)); \
1578 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1579 } while (0)
1580#else
1581# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1582 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1583#endif
1584
1585/**
1586 * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
1587 * address variant.
1588 *
1589 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1590 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1591 * @param[in] a_GCPtrMem The memory address.
1592 * @remarks Will return/long jump on errors.
1593 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1594 */
1595#ifndef IEM_WITH_SETJMP
1596# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1597 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \
1598 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)); \
1599 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1600 } while (0)
1601#else
1602# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1603 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1604#endif
1605
1606/**
1607 * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
1608 * address variant.
1609 *
1610 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1611 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1612 * @param[in] a_GCPtrMem The memory address.
1613 * @remarks Will return/long jump on errors.
1614 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1615 */
1616#ifndef IEM_WITH_SETJMP
1617# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1618 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \
1619 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)); \
1620 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1621 } while (0)
1622#else
1623# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1624 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1625#endif
1626
1627
1628/* 32-bit */
1629
1630/**
1631 * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
1632 *
1633 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1634 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1635 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1636 * @param[in] a_GCPtrMem The memory address.
1637 * @remarks Will return/long jump on errors.
1638 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1639 */
1640#ifndef IEM_WITH_SETJMP
1641# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1642 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \
1643 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)); \
1644 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1645 } while (0)
1646#else
1647# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1648 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1649#endif
1650
1651/**
1652 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
1653 *
1654 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1655 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1656 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1657 * @param[in] a_GCPtrMem The memory address.
1658 * @remarks Will return/long jump on errors.
1659 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1660 */
1661#ifndef IEM_WITH_SETJMP
1662# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1663 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \
1664 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)); \
1665 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1666 } while (0)
1667#else
1668# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1669 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1670#endif
1671
1672/**
1673 * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
1674 *
1675 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1676 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1677 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1678 * @param[in] a_GCPtrMem The memory address.
1679 * @remarks Will return/long jump on errors.
1680 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1681 */
1682#ifndef IEM_WITH_SETJMP
1683# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1684 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \
1685 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)); \
1686 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1687 } while (0)
1688#else
1689# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1690 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1691#endif
1692
1693/**
1694 * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
1695 * flat address variant.
1696 *
1697 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1698 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1699 * @param[in] a_GCPtrMem The memory address.
1700 * @remarks Will return/long jump on errors.
1701 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1702 */
1703#ifndef IEM_WITH_SETJMP
1704# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1705 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \
1706 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)); \
1707 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1708 } while (0)
1709#else
1710# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1711 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1712#endif
1713
1714/**
1715 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
1716 * address variant.
1717 *
1718 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1719 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1720 * @param[in] a_GCPtrMem The memory address.
1721 * @remarks Will return/long jump on errors.
1722 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1723 */
1724#ifndef IEM_WITH_SETJMP
1725# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1726 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \
1727 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)); \
1728 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1729 } while (0)
1730#else
1731# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1732 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1733#endif
1734
1735/**
1736 * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
1737 * address variant.
1738 *
1739 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1740 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1741 * @param[in] a_GCPtrMem The memory address.
1742 * @remarks Will return/long jump on errors.
1743 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1744 */
1745#ifndef IEM_WITH_SETJMP
1746# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1747 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \
1748 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)); \
1749 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1750 } while (0)
1751#else
1752# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1753 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1754#endif
1755
1756
1757/* 64-bit */
1758
1759/**
1760 * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
1761 *
1762 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1763 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1764 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1765 * @param[in] a_GCPtrMem The memory address.
1766 * @remarks Will return/long jump on errors.
1767 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1768 */
1769#ifndef IEM_WITH_SETJMP
1770# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1771 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \
1772 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)); \
1773 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1774 } while (0)
1775#else
1776# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1777 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1778#endif
1779
1780/**
1781 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
1782 *
1783 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1784 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1785 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1786 * @param[in] a_GCPtrMem The memory address.
1787 * @remarks Will return/long jump on errors.
1788 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1789 */
1790#ifndef IEM_WITH_SETJMP
1791# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1792 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \
1793 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
1794 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1795 } while (0)
1796#else
1797# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1798 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1799#endif
1800
1801/**
1802 * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
1803 *
1804 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1805 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1806 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1807 * @param[in] a_GCPtrMem The memory address.
1808 * @remarks Will return/long jump on errors.
1809 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1810 */
1811#ifndef IEM_WITH_SETJMP
1812# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
1813 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \
1814 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)); \
1815 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1816 } while (0)
1817#else
1818# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1819 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1820#endif
1821
1822/**
1823 * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
1824 * flat address variant.
1825 *
1826 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1827 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1828 * @param[in] a_GCPtrMem The memory address.
1829 * @remarks Will return/long jump on errors.
1830 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1831 */
1832#ifndef IEM_WITH_SETJMP
1833# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1834 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \
1835 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)); \
1836 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
1837 } while (0)
1838#else
1839# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1840 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1841#endif
1842
1843/**
1844 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
1845 * address variant.
1846 *
1847 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1848 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1849 * @param[in] a_GCPtrMem The memory address.
1850 * @remarks Will return/long jump on errors.
1851 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1852 */
1853#ifndef IEM_WITH_SETJMP
1854# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1855 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \
1856 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
1857 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
1858 } while (0)
1859#else
1860# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1861 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1862#endif
1863
1864/**
1865 * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
1866 * address variant.
1867 *
1868 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1869 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1870 * @param[in] a_GCPtrMem The memory address.
1871 * @remarks Will return/long jump on errors.
1872 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1873 */
1874#ifndef IEM_WITH_SETJMP
1875# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \
1876 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \
1877 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)); \
1878 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
1879 } while (0)
1880#else
1881# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1882 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1883#endif
1884
1885
1886/* commit + unmap */
1887
1888/** Commits the memory and unmaps guest memory previously mapped RW.
1889 * @remarks May return.
1890 */
1891#ifndef IEM_WITH_SETJMP
1892# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) do { \
1893 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); \
1894 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_RW)); \
1895 } while (0)
1896#else
1897# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) \
1898 iemMemCommitAndUnmapRwJmp(pVCpu, (a_pvMem), (a_bMapInfo))
1899#endif
1900
1901/** Commits the memory and unmaps guest memory previously mapped W.
1902 * @remarks May return.
1903 */
1904#ifndef IEM_WITH_SETJMP
1905# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \
1906 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \
1907 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \
1908 } while (0)
1909#else
1910# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \
1911 iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), (a_bMapInfo))
1912#endif
1913
1914/** Commits the memory and unmaps guest memory previously mapped R.
1915 * @remarks May return.
1916 */
1917#ifndef IEM_WITH_SETJMP
1918# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) do { \
1919 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); \
1920 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (void *)(a_pvMem), IEM_ACCESS_DATA_R)); \
1921 } while (0)
1922#else
1923# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) \
1924 iemMemCommitAndUnmapRoJmp(pVCpu, (a_pvMem), (a_bMapInfo))
1925#endif
1926
1927
1928/** Commits the memory and unmaps the guest memory unless the FPU status word
1929 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
1930 * that would cause FLD not to store.
1931 *
1932 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
1933 * store, while \#P will not.
1934 *
1935 * @remarks May in theory return - for now.
1936 */
1937#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
1938 do { \
1939 if ( !(a_u16FSW & X86_FSW_ES) \
1940 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
1941 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
1942 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
1943 } while (0)
1944
1945
1946
1947/** Calculate efficient address from R/M. */
1948#ifndef IEM_WITH_SETJMP
1949# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
1950 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff)))
1951#else
1952# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
1953 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
1954#endif
1955
1956#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
1957#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
1958#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
1959#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
1960#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
1961#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
1962#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
1963
1964/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
1965 *
1966 * These clues are mainly for the recompiler, so that it can emit correct code.
1967 *
1968 * They are processed by the python script and which also automatically
1969 * calculates flags for MC blocks based on the statements, extending the use of
1970 * these flags to describe MC block behavior to the recompiler core. The python
1971 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
1972 * error checking purposes. The script emits the necessary fEndTb = true and
1973 * similar statements as this reduces compile time a tiny bit.
1974 *
1975 * @{ */
1976/** Flag set if direct branch, clear if absolute or indirect. */
1977#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
1978/** Flag set if indirect branch, clear if direct or relative.
1979 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
1980 * as well as for return instructions (RET, IRET, RETF). */
1981#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
1982/** Flag set if relative branch, clear if absolute or indirect. */
1983#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
1984/** Flag set if conditional branch, clear if unconditional. */
1985#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
1986/** Flag set if it's a far branch (changes CS). */
1987#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
1988/** Convenience: Testing any kind of branch. */
1989#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
1990
1991/** Execution flags may change (IEMCPU::fExec). */
1992#define IEM_CIMPL_F_MODE RT_BIT_32(5)
1993/** May change significant portions of RFLAGS. */
1994#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
1995/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS . */
1996#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
1997/** May enable interrupts, so recheck IRQ immediately afterwards executing
1998 * the instruction. */
1999#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(8)
2000/** May disable interrupts, so recheck IRQ immediately before executing the
2001 * instruction. */
2002#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(9)
2003/** Convenience: Check for IRQ both before and after an instruction. */
2004#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
2005/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
2006#define IEM_CIMPL_F_VMEXIT RT_BIT_32(10)
2007/** May modify FPU state.
2008 * @todo Not sure if this is useful yet. */
2009#define IEM_CIMPL_F_FPU RT_BIT_32(11)
2010/** REP prefixed instruction which may yield before updating PC.
2011 * @todo Not sure if this is useful, REP functions now return non-zero
2012 * status if they don't update the PC. */
2013#define IEM_CIMPL_F_REP RT_BIT_32(12)
2014/** I/O instruction.
2015 * @todo Not sure if this is useful yet. */
2016#define IEM_CIMPL_F_IO RT_BIT_32(13)
2017/** Force end of TB after the instruction. */
2018#define IEM_CIMPL_F_END_TB RT_BIT_32(14)
2019/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
2020#define IEM_CIMPL_F_XCPT \
2021 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
2022/** @} */
2023
2024/** @def IEM_MC_CALL_CIMPL_HLP_RET
2025 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
2026 */
2027#ifdef VBOX_STRICT
2028#define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
2029 do { \
2030 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
2031 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \
2032 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \
2033 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \
2034 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \
2035 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
2036 if (rcStrictHlp == VINF_SUCCESS) \
2037 { \
2038 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
2039 || ( uRipBefore + cbInstr == pVCpu->cpum.GstCtx.rip \
2040 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \
2041 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \
2042 && uRipBefore == pVCpu->cpum.GstCtx.rip \
2043 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \
2044 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
2045 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, uRipBefore + cbInstr)); \
2046 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
2047 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
2048 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
2049 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
2050 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
2051 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2052 else \
2053 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
2054 == (fEflBefore & ~(X86_EFL_RF)), \
2055 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2056 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
2057 { \
2058 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
2059 AssertMsg(fExecBefore == fExecRecalc, \
2060 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
2061 } \
2062 } \
2063 return rcStrictHlp; \
2064 } while (0)
2065#else
2066# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
2067#endif
2068
2069/**
2070 * Defers the rest of the instruction emulation to a C implementation routine
2071 * and returns, only taking the standard parameters.
2072 *
2073 * @param a_fFlags IEM_CIMPL_F_XXX.
2074 * @param a_pfnCImpl The pointer to the C routine.
2075 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2076 */
2077#define IEM_MC_CALL_CIMPL_0(a_fFlags, a_pfnCImpl) \
2078 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2079
2080/**
2081 * Defers the rest of instruction emulation to a C implementation routine and
2082 * returns, taking one argument in addition to the standard ones.
2083 *
2084 * @param a_fFlags IEM_CIMPL_F_XXX.
2085 * @param a_pfnCImpl The pointer to the C routine.
2086 * @param a0 The argument.
2087 */
2088#define IEM_MC_CALL_CIMPL_1(a_fFlags, a_pfnCImpl, a0) \
2089 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2090
2091/**
2092 * Defers the rest of the instruction emulation to a C implementation routine
2093 * and returns, taking two arguments in addition to the standard ones.
2094 *
2095 * @param a_fFlags IEM_CIMPL_F_XXX.
2096 * @param a_pfnCImpl The pointer to the C routine.
2097 * @param a0 The first extra argument.
2098 * @param a1 The second extra argument.
2099 */
2100#define IEM_MC_CALL_CIMPL_2(a_fFlags, a_pfnCImpl, a0, a1) \
2101 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2102
2103/**
2104 * Defers the rest of the instruction emulation to a C implementation routine
2105 * and returns, taking three arguments in addition to the standard ones.
2106 *
2107 * @param a_fFlags IEM_CIMPL_F_XXX.
2108 * @param a_pfnCImpl The pointer to the C routine.
2109 * @param a0 The first extra argument.
2110 * @param a1 The second extra argument.
2111 * @param a2 The third extra argument.
2112 */
2113#define IEM_MC_CALL_CIMPL_3(a_fFlags, a_pfnCImpl, a0, a1, a2) \
2114 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2115
2116/**
2117 * Defers the rest of the instruction emulation to a C implementation routine
2118 * and returns, taking four arguments in addition to the standard ones.
2119 *
2120 * @param a_fFlags IEM_CIMPL_F_XXX.
2121 * @param a_pfnCImpl The pointer to the C routine.
2122 * @param a0 The first extra argument.
2123 * @param a1 The second extra argument.
2124 * @param a2 The third extra argument.
2125 * @param a3 The fourth extra argument.
2126 */
2127#define IEM_MC_CALL_CIMPL_4(a_fFlags, a_pfnCImpl, a0, a1, a2, a3) \
2128 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
2129
2130/**
2131 * Defers the rest of the instruction emulation to a C implementation routine
2132 * and returns, taking two arguments in addition to the standard ones.
2133 *
2134 * @param a_fFlags IEM_CIMPL_F_XXX.
2135 * @param a_pfnCImpl The pointer to the C routine.
2136 * @param a0 The first extra argument.
2137 * @param a1 The second extra argument.
2138 * @param a2 The third extra argument.
2139 * @param a3 The fourth extra argument.
2140 * @param a4 The fifth extra argument.
2141 */
2142#define IEM_MC_CALL_CIMPL_5(a_fFlags, a_pfnCImpl, a0, a1, a2, a3, a4) \
2143 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
2144
2145/**
2146 * Defers the entire instruction emulation to a C implementation routine and
2147 * returns, only taking the standard parameters.
2148 *
2149 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2150 *
2151 * @param a_fFlags IEM_CIMPL_F_XXX.
2152 * @param a_pfnCImpl The pointer to the C routine.
2153 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2154 */
2155#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) \
2156 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2157
2158/**
2159 * Defers the entire instruction emulation to a C implementation routine and
2160 * returns, taking one argument in addition to the standard ones.
2161 *
2162 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2163 *
2164 * @param a_fFlags IEM_CIMPL_F_XXX.
2165 * @param a_pfnCImpl The pointer to the C routine.
2166 * @param a0 The argument.
2167 */
2168#define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_pfnCImpl, a0) \
2169 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2170
2171/**
2172 * Defers the entire instruction emulation to a C implementation routine and
2173 * returns, taking two arguments in addition to the standard ones.
2174 *
2175 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2176 *
2177 * @param a_fFlags IEM_CIMPL_F_XXX.
2178 * @param a_pfnCImpl The pointer to the C routine.
2179 * @param a0 The first extra argument.
2180 * @param a1 The second extra argument.
2181 */
2182#define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_pfnCImpl, a0, a1) \
2183 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2184
2185/**
2186 * Defers the entire instruction emulation to a C implementation routine and
2187 * returns, taking three arguments in addition to the standard ones.
2188 *
2189 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2190 *
2191 * @param a_fFlags IEM_CIMPL_F_XXX.
2192 * @param a_pfnCImpl The pointer to the C routine.
2193 * @param a0 The first extra argument.
2194 * @param a1 The second extra argument.
2195 * @param a2 The third extra argument.
2196 */
2197#define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_pfnCImpl, a0, a1, a2) \
2198 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2199
2200
2201/**
2202 * Calls a FPU assembly implementation taking one visible argument.
2203 *
2204 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2205 * @param a0 The first extra argument.
2206 */
2207#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
2208 do { \
2209 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
2210 } while (0)
2211
2212/**
2213 * Calls a FPU assembly implementation taking two visible arguments.
2214 *
2215 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2216 * @param a0 The first extra argument.
2217 * @param a1 The second extra argument.
2218 */
2219#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
2220 do { \
2221 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2222 } while (0)
2223
2224/**
2225 * Calls a FPU assembly implementation taking three visible arguments.
2226 *
2227 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2228 * @param a0 The first extra argument.
2229 * @param a1 The second extra argument.
2230 * @param a2 The third extra argument.
2231 */
2232#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2233 do { \
2234 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2235 } while (0)
2236
2237#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
2238 do { \
2239 (a_FpuData).FSW = (a_FSW); \
2240 (a_FpuData).r80Result = *(a_pr80Value); \
2241 } while (0)
2242
2243/** Pushes FPU result onto the stack. */
2244#define IEM_MC_PUSH_FPU_RESULT(a_FpuData, a_uFpuOpcode) \
2245 iemFpuPushResult(pVCpu, &a_FpuData, a_uFpuOpcode)
2246/** Pushes FPU result onto the stack and sets the FPUDP. */
2247#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2248 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2249
2250/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
2251#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo, a_uFpuOpcode) \
2252 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo, a_uFpuOpcode)
2253
2254/** Stores FPU result in a stack register. */
2255#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg, a_uFpuOpcode) \
2256 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2257/** Stores FPU result in a stack register and pops the stack. */
2258#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg, a_uFpuOpcode) \
2259 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2260/** Stores FPU result in a stack register and sets the FPUDP. */
2261#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2262 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2263/** Stores FPU result in a stack register, sets the FPUDP, and pops the
2264 * stack. */
2265#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2266 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2267
2268/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
2269#define IEM_MC_UPDATE_FPU_OPCODE_IP(a_uFpuOpcode) \
2270 iemFpuUpdateOpcodeAndIp(pVCpu, a_uFpuOpcode)
2271/** Free a stack register (for FFREE and FFREEP). */
2272#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
2273 iemFpuStackFree(pVCpu, a_iStReg)
2274/** Increment the FPU stack pointer. */
2275#define IEM_MC_FPU_STACK_INC_TOP() \
2276 iemFpuStackIncTop(pVCpu)
2277/** Decrement the FPU stack pointer. */
2278#define IEM_MC_FPU_STACK_DEC_TOP() \
2279 iemFpuStackDecTop(pVCpu)
2280
2281/** Updates the FSW, FOP, FPUIP, and FPUCS. */
2282#define IEM_MC_UPDATE_FSW(a_u16FSW, a_uFpuOpcode) \
2283 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2284/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
2285#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW, a_uFpuOpcode) \
2286 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2287/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
2288#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2289 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2290/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
2291#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW, a_uFpuOpcode) \
2292 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2293/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
2294 * stack. */
2295#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2296 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2297/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
2298#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW, a_uFpuOpcode) \
2299 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2300
2301/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
2302#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst, a_uFpuOpcode) \
2303 iemFpuStackUnderflow(pVCpu, a_iStDst, a_uFpuOpcode)
2304/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2305 * stack. */
2306#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst, a_uFpuOpcode) \
2307 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst, a_uFpuOpcode)
2308/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2309 * FPUDS. */
2310#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2311 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2312/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2313 * FPUDS. Pops stack. */
2314#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2315 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2316/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2317 * stack twice. */
2318#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP(a_uFpuOpcode) \
2319 iemFpuStackUnderflowThenPopPop(pVCpu, a_uFpuOpcode)
2320/** Raises a FPU stack underflow exception for an instruction pushing a result
2321 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
2322#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW(a_uFpuOpcode) \
2323 iemFpuStackPushUnderflow(pVCpu, a_uFpuOpcode)
2324/** Raises a FPU stack underflow exception for an instruction pushing a result
2325 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
2326#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO(a_uFpuOpcode) \
2327 iemFpuStackPushUnderflowTwo(pVCpu, a_uFpuOpcode)
2328
2329/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2330 * FPUIP, FPUCS and FOP. */
2331#define IEM_MC_FPU_STACK_PUSH_OVERFLOW(a_uFpuOpcode) \
2332 iemFpuStackPushOverflow(pVCpu, a_uFpuOpcode)
2333/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2334 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
2335#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2336 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2337/** Prepares for using the FPU state.
2338 * Ensures that we can use the host FPU in the current context (RC+R0.
2339 * Ensures the guest FPU state in the CPUMCTX is up to date. */
2340#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
2341/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
2342#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
2343/** Actualizes the guest FPU state so it can be accessed and modified. */
2344#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
2345
2346/** Stores SSE SIMD result updating MXCSR. */
2347#define IEM_MC_STORE_SSE_RESULT(a_SseData, a_iXmmReg) \
2348 iemSseStoreResult(pVCpu, &a_SseData, a_iXmmReg)
2349/** Updates MXCSR. */
2350#define IEM_MC_SSE_UPDATE_MXCSR(a_fMxcsr) \
2351 iemSseUpdateMxcsr(pVCpu, a_fMxcsr)
2352
2353/** Prepares for using the SSE state.
2354 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
2355 * Ensures the guest SSE state in the CPUMCTX is up to date. */
2356#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
2357/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2358#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
2359/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
2360#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
2361
2362/** Prepares for using the AVX state.
2363 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
2364 * Ensures the guest AVX state in the CPUMCTX is up to date.
2365 * @note This will include the AVX512 state too when support for it is added
2366 * due to the zero extending feature of VEX instruction. */
2367#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
2368/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2369#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
2370/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
2371#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
2372
2373/**
2374 * Calls a MMX assembly implementation taking two visible arguments.
2375 *
2376 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2377 * @param a0 The first extra argument.
2378 * @param a1 The second extra argument.
2379 */
2380#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
2381 do { \
2382 IEM_MC_PREPARE_FPU_USAGE(); \
2383 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2384 } while (0)
2385
2386/**
2387 * Calls a MMX assembly implementation taking three visible arguments.
2388 *
2389 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2390 * @param a0 The first extra argument.
2391 * @param a1 The second extra argument.
2392 * @param a2 The third extra argument.
2393 */
2394#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2395 do { \
2396 IEM_MC_PREPARE_FPU_USAGE(); \
2397 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2398 } while (0)
2399
2400
2401/**
2402 * Calls a SSE assembly implementation taking two visible arguments.
2403 *
2404 * @param a_pfnAImpl Pointer to the assembly SSE routine.
2405 * @param a0 The first extra argument.
2406 * @param a1 The second extra argument.
2407 */
2408#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
2409 do { \
2410 IEM_MC_PREPARE_SSE_USAGE(); \
2411 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2412 } while (0)
2413
2414/**
2415 * Calls a SSE assembly implementation taking three visible arguments.
2416 *
2417 * @param a_pfnAImpl Pointer to the assembly SSE routine.
2418 * @param a0 The first extra argument.
2419 * @param a1 The second extra argument.
2420 * @param a2 The third extra argument.
2421 */
2422#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2423 do { \
2424 IEM_MC_PREPARE_SSE_USAGE(); \
2425 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2426 } while (0)
2427
2428
2429/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
2430 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
2431#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
2432 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
2433
2434/**
2435 * Calls a AVX assembly implementation taking two visible arguments.
2436 *
2437 * There is one implicit zero'th argument, a pointer to the extended state.
2438 *
2439 * @param a_pfnAImpl Pointer to the assembly AVX routine.
2440 * @param a1 The first extra argument.
2441 * @param a2 The second extra argument.
2442 */
2443#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
2444 do { \
2445 IEM_MC_PREPARE_AVX_USAGE(); \
2446 a_pfnAImpl(pXState, (a1), (a2)); \
2447 } while (0)
2448
2449/**
2450 * Calls a AVX assembly implementation taking three visible arguments.
2451 *
2452 * There is one implicit zero'th argument, a pointer to the extended state.
2453 *
2454 * @param a_pfnAImpl Pointer to the assembly AVX routine.
2455 * @param a1 The first extra argument.
2456 * @param a2 The second extra argument.
2457 * @param a3 The third extra argument.
2458 */
2459#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
2460 do { \
2461 IEM_MC_PREPARE_AVX_USAGE(); \
2462 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
2463 } while (0)
2464
2465/** @note Not for IOPL or IF testing. */
2466#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
2467/** @note Not for IOPL or IF testing. */
2468#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
2469/** @note Not for IOPL or IF testing. */
2470#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
2471/** @note Not for IOPL or IF testing. */
2472#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
2473/** @note Not for IOPL or IF testing. */
2474#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
2475 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2476 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2477/** @note Not for IOPL or IF testing. */
2478#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
2479 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2480 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2481/** @note Not for IOPL or IF testing. */
2482#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
2483 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
2484 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2485 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2486/** @note Not for IOPL or IF testing. */
2487#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
2488 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
2489 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2490 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2491#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
2492#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
2493#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
2494/** @note Not for IOPL or IF testing. */
2495#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
2496 if ( pVCpu->cpum.GstCtx.cx != 0 \
2497 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2498/** @note Not for IOPL or IF testing. */
2499#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
2500 if ( pVCpu->cpum.GstCtx.ecx != 0 \
2501 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2502/** @note Not for IOPL or IF testing. */
2503#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
2504 if ( pVCpu->cpum.GstCtx.rcx != 0 \
2505 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2506/** @note Not for IOPL or IF testing. */
2507#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
2508 if ( pVCpu->cpum.GstCtx.cx != 0 \
2509 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2510/** @note Not for IOPL or IF testing. */
2511#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
2512 if ( pVCpu->cpum.GstCtx.ecx != 0 \
2513 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2514/** @note Not for IOPL or IF testing. */
2515#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
2516 if ( pVCpu->cpum.GstCtx.rcx != 0 \
2517 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2518#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
2519#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
2520
2521#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
2522 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
2523#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
2524 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
2525#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
2526 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
2527#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
2528 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
2529#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
2530 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
2531#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
2532 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
2533#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
2534 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
2535#define IEM_MC_IF_FCW_IM() \
2536 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
2537#define IEM_MC_IF_MXCSR_XCPT_PENDING() \
2538 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
2539 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) {
2540
2541#define IEM_MC_ELSE() } else {
2542#define IEM_MC_ENDIF() } do {} while (0)
2543
2544/** @} */
2545
2546#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
2547
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette