VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 108296

Last change on this file since 108296 was 108296, checked in by vboxsync, 3 months ago

VMM/IEM: s/IEM_MC_STORE_MEM_(?!FLAT|SEG|BY)/IEM_MC_STORE_MEM_SEG_\2/g jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 149.9 KB
Line 
1/* $Id: IEMMc.h 108296 2025-02-19 14:44:11Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42
43#define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) {
44#define IEM_MC_END() }
45
46/** Internal macro. */
47#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
48 do \
49 { \
50 VBOXSTRICTRC rcStrict2 = a_Expr; \
51 if (rcStrict2 == VINF_SUCCESS) \
52 { /* likely */ } \
53 else \
54 return rcStrict2; \
55 } while (0)
56
57
58/** Dummy MC that prevents native recompilation. */
59#define IEM_MC_NO_NATIVE_RECOMPILE() ((void)0)
60
61/** Advances RIP, finishes the instruction and returns.
62 * This may include raising debug exceptions and such. */
63#define IEM_MC_ADVANCE_PC_AND_FINISH() return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
64
65
66/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
67#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
68 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
69/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
70 * @note only usable in 16-bit op size mode. */
71#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
72 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
73/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
74#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
75 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
76/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
77#define IEM_MC_IND_JMP_U16_AND_FINISH(a_u16NewIP) \
78 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), IEM_GET_INSTR_LEN(pVCpu))
79/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
80#define IEM_MC_IND_JMP_U32_AND_FINISH(a_u32NewIP) \
81 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP), IEM_GET_INSTR_LEN(pVCpu))
82/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
83#define IEM_MC_IND_JMP_U64_AND_FINISH(a_u64NewIP) \
84 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP), IEM_GET_INSTR_LEN(pVCpu))
85
86/** Sets RIP (may trigger \#GP), finishes the instruction and returns.
87 * @note only usable in 16-bit op size mode. */
88#define IEM_MC_REL_CALL_S16_AND_FINISH(a_i16) \
89 return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
90/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
91#define IEM_MC_REL_CALL_S32_AND_FINISH(a_i32) \
92 return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32))
93/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
94#define IEM_MC_REL_CALL_S64_AND_FINISH(a_i64) \
95 return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i64))
96/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
97#define IEM_MC_IND_CALL_U16_AND_FINISH(a_u16NewIP) \
98 return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u16NewIP))
99/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
100#define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP) \
101 return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u32NewIP))
102/** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
103#define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP) \
104 return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u64NewIP))
105
106
107/** Fetches the near return address from the stack, sets RIP and RSP (may trigger
108 * \#GP or \#SS), finishes the instruction and returns. */
109#define IEM_MC_RETN_AND_FINISH(a_cbPopArgs) \
110 return iemRegRipNearReturnAndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_cbPopArgs), pVCpu->iem.s.enmEffOpSize)
111
112
113#define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) \
114 do { \
115 if (RT_LIKELY((a_uVar) != 0)) \
116 { /* probable */ } \
117 else return iemRaiseDivideError(pVCpu); \
118 } while (0)
119#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
120 do { \
121 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)))) \
122 { /* probable */ } \
123 else return iemRaiseDeviceNotAvailable(pVCpu); \
124 } while (0)
125#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
126 do { \
127 if (RT_LIKELY(!((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)))) \
128 { /* probable */ } \
129 else return iemRaiseDeviceNotAvailable(pVCpu); \
130 } while (0)
131#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
132 do { \
133 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))) \
134 { /* probable */ } \
135 else return iemRaiseMathFault(pVCpu); \
136 } while (0)
137#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
138 do { \
139 /* Since none of the bits we compare from XCR0, CR4 and CR0 overlap, it can \
140 be reduced to a single compare branch in the more probably code path. */ \
141 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) \
142 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
143 | (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) \
144 == (XSAVE_C_YMM | XSAVE_C_SSE | X86_CR4_OSXSAVE))) \
145 { /* probable */ } \
146 else if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
147 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) \
148 return iemRaiseUndefinedOpcode(pVCpu); \
149 else \
150 return iemRaiseDeviceNotAvailable(pVCpu); \
151 } while (0)
152AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR4_OSXSAVE));
153AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR0_TS));
154AssertCompile(!(X86_CR4_OSXSAVE & X86_CR0_TS));
155#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
156 do { \
157 /* Since the CR4 and CR0 bits doesn't overlap, it can be reduced to a
158 single compare branch in the more probable code path. */ \
159 if (RT_LIKELY( ( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
160 | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
161 == X86_CR4_OSFXSR)) \
162 { /* likely */ } \
163 else if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
164 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
165 return iemRaiseUndefinedOpcode(pVCpu); \
166 else \
167 return iemRaiseDeviceNotAvailable(pVCpu); \
168 } while (0)
169AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_CR4_OSFXSR));
170#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
171 do { \
172 /* Since the two CR0 bits doesn't overlap with FSW.ES, this can be reduced to a
173 single compare branch in the more probable code path. */ \
174 if (RT_LIKELY(!( (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
175 | (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES)))) \
176 { /* probable */ } \
177 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
178 return iemRaiseUndefinedOpcode(pVCpu); \
179 else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
180 return iemRaiseDeviceNotAvailable(pVCpu); \
181 else \
182 return iemRaiseMathFault(pVCpu); \
183 } while (0)
184AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_FSW_ES));
185/** @todo recomp: this one is slightly problematic as the recompiler doesn't
186 * count the CPL into the TB key. However it is safe enough for now, as
187 * it calls iemRaiseGeneralProtectionFault0 directly so no calls will be
188 * emitted for it. */
189#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
190 do { \
191 if (RT_LIKELY(IEM_GET_CPL(pVCpu) == 0)) { /* probable */ } \
192 else return iemRaiseGeneralProtectionFault0(pVCpu); \
193 } while (0)
194#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
195 do { \
196 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
197 else return iemRaiseGeneralProtectionFault0(pVCpu); \
198 } while (0)
199#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
200 do { \
201 if (RT_LIKELY( ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE) | IEM_GET_CPU_MODE(pVCpu)) \
202 == (X86_CR4_FSGSBASE | IEMMODE_64BIT))) \
203 { /* probable */ } \
204 else return iemRaiseUndefinedOpcode(pVCpu); \
205 } while (0)
206AssertCompile(X86_CR4_FSGSBASE > UINT8_MAX);
207#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
208 do { \
209 if (RT_LIKELY(IEM_IS_CANONICAL(a_u64Addr))) { /* likely */ } \
210 else return iemRaiseGeneralProtectionFault0(pVCpu); \
211 } while (0)
212
213
214#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
215#define IEM_MC_LOCAL_ASSIGN(a_Type, a_Name, a_Value) a_Type a_Name = (a_Value)
216#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
217#define IEM_MC_NOREF(a_Name) RT_NOREF_PV(a_Name) /* NOP/liveness hack */
218#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
219#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
220#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
221/** @note IEMAllInstPython.py duplicates the expansion. */
222#define IEM_MC_ARG_EFLAGS(a_Name, a_iArg) uint32_t const a_Name = pVCpu->cpum.GstCtx.eflags.u
223/** @note IEMAllInstPython.py duplicates the expansion. */
224#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
225 uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u; \
226 uint32_t *a_pName = &a_Name
227/** @note IEMAllInstPython.py duplicates the expansion. */
228#define IEM_MC_LOCAL_EFLAGS(a_Name) uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u
229#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
230 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
231#define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) do { \
232 AssertMsg((pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) == ((a_EFlags) & ~(a_fEflOutput)), \
233 ("eflags.u=%#x (%#x) vs %s=%#x (%#x) - diff %#x (a_fEflOutput=%#x)\n", \
234 pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput), pVCpu->cpum.GstCtx.eflags.u, #a_EFlags, \
235 (a_EFlags) & ~(a_fEflOutput), (a_EFlags), \
236 (pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) ^ ((a_EFlags) & ~(a_fEflOutput)), a_fEflOutput)); \
237 pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); \
238 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); \
239 } while (0)
240#define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags) IEM_MC_COMMIT_EFLAGS(a_EFlags)
241#define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput)
242
243/** ASSUMES the source variable not used after this statement. */
244#define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) (a_VarDst) = (a_VarSrcEol)
245
246#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
247#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
248#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
249#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
250#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
251#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
252#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
253#define IEM_MC_FETCH_GREG_I16(a_i16Dst, a_iGReg) (a_i16Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
254#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
255#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
256#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
257#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
258#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
259#define IEM_MC_FETCH_GREG_I32(a_i32Dst, a_iGReg) (a_i32Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
260#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
261#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
262#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
263#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
264#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
265#define IEM_MC_FETCH_GREG_PAIR_U32(a_u64Dst, a_iGRegLo, a_iGRegHi) do { \
266 (a_u64Dst).s.Lo = iemGRegFetchU32(pVCpu, (a_iGRegLo)); \
267 (a_u64Dst).s.Hi = iemGRegFetchU32(pVCpu, (a_iGRegHi)); \
268 } while(0)
269#define IEM_MC_FETCH_GREG_PAIR_U64(a_u128Dst, a_iGRegLo, a_iGRegHi) do { \
270 (a_u128Dst).s.Lo = iemGRegFetchU64(pVCpu, (a_iGRegLo)); \
271 (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
272 } while(0)
273#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
274 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
275 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
276 } while (0)
277#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
278 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
279 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
280 } while (0)
281#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
282 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
283 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
284 } while (0)
285/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
286#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
287 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
288 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
289 } while (0)
290#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
291 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
292 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
293 } while (0)
294/** @note Not for IOPL or IF testing or modification. */
295#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
296#define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_FETCH_EFLAGS(a_EFlags)
297#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u /* (only LAHF) */
298#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
299#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
300
301#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
302#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
303#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
304#define IEM_MC_STORE_GREG_I32(a_iGReg, a_i32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_i32Value) /* clear high bits. */
305#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
306#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
307#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
308#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
309#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
310#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
311#define IEM_MC_STORE_GREG_PAIR_U32(a_iGRegLo, a_iGRegHi, a_u64Value) do { \
312 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint32_t)(a_u64Value).s.Lo; \
313 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint32_t)(a_u64Value).s.Hi; \
314 } while(0)
315#define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) do { \
316 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint64_t)(a_u128Value).s.Lo; \
317 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint64_t)(a_u128Value).s.Hi; \
318 } while(0)
319#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
320
321/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
322#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
323 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
324 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
325 } while (0)
326#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
327 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
328 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
329 } while (0)
330#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
331 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
332
333
334#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
335#define IEM_MC_REF_GREG_U8_CONST(a_pu8Dst, a_iGReg) (a_pu8Dst) = (uint8_t const *)iemGRegRefU8( pVCpu, (a_iGReg))
336#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
337#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
338/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
339 * Use IEM_MC_CLEAR_HIGH_GREG_U64! */
340#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
341#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
342#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
343#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
344#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
345#define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
346#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
347#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
348/** @note Not for IOPL or IF testing or modification.
349 * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
350#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
351#define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) IEM_MC_REF_EFLAGS(a_pEFlags)
352
353#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
354#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
355 do { \
356 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
357 *pu32Reg += (a_u32Value); \
358 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
359 } while (0)
360#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
361
362#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8Const) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u8Const)
363#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
364 do { \
365 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
366 *pu32Reg -= (a_u8Const); \
367 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
368 } while (0)
369#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
370#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
371
372#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
373#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
374#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
375#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
376#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
377#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
378#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
379
380#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
381#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
382#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
383#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
384
385#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
386#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
387#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
388
389#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
390#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
391#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
392
393#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
394#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
395#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
396
397#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
398
399#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
400#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
401#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
402
403#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
404
405#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
406
407#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
408#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
409#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
410 do { \
411 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
412 *pu32Reg &= (a_u32Value); \
413 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
414 } while (0)
415#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
416
417#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
418#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
419#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
420 do { \
421 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
422 *pu32Reg |= (a_u32Value); \
423 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
424 } while (0)
425#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
426
427#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
428#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
429#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
430
431/** @note Not for IOPL or IF modification. */
432#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
433/** @note Not for IOPL or IF modification. */
434#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
435/** @note Not for IOPL or IF modification. */
436#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
437
438#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
439
440/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
441#define IEM_MC_FPU_TO_MMX_MODE() do { \
442 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
443 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
444 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
445 } while (0)
446
447/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
448#define IEM_MC_FPU_FROM_MMX_MODE() do { \
449 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
450 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
451 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
452 } while (0)
453
454#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
455 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
456#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg, a_iDWord) \
457 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[a_iDWord]; } while (0)
458#define IEM_MC_FETCH_MREG_U16(a_u16Value, a_iMReg, a_iWord) \
459 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[a_iWord]; } while (0)
460#define IEM_MC_FETCH_MREG_U8(a_u8Value, a_iMReg, a_iByte) \
461 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[a_iByte]; } while (0)
462#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
463 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
464 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
465 } while (0)
466#define IEM_MC_STORE_MREG_U32(a_iMReg, a_iDword, a_u32Value) \
467 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[(a_iDword)] = (a_u32Value); \
468 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
469 } while (0)
470#define IEM_MC_STORE_MREG_U16(a_iMReg, a_iWord, a_u16Value) \
471 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[(a_iWord)] = (a_u16Value); \
472 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
473 } while (0)
474#define IEM_MC_STORE_MREG_U8(a_iMReg, a_iByte, a_u8Value) \
475 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[(a_iByte)] = (a_u8Value); \
476 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
477 } while (0)
478#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
479 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
480 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
481 } while (0)
482#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
483 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
484#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
485 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
486#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
487 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
488#define IEM_MC_MODIFIED_MREG(a_iMReg) \
489 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
490#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
491 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
492
493#define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
494 do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
495 if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
496 if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
497 if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
498 } while (0)
499#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
500 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
501 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
502 } while (0)
503#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
504 do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
505 (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
506 } while (0)
507#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
508 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
509#define IEM_MC_FETCH_XREG_R64(a_r64Value, a_iXReg, a_iQWord) \
510 do { (a_r64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[(a_iQWord)]; } while (0)
511#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
512 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
513#define IEM_MC_FETCH_XREG_R32(a_r32Value, a_iXReg, a_iDWord) \
514 do { (a_r32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[(a_iDWord)]; } while (0)
515#define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
516 do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
517#define IEM_MC_FETCH_XREG_U8( a_u8Value, a_iXReg, a_iByte) \
518 do { (a_u8Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)]; } while (0)
519#define IEM_MC_FETCH_XREG_PAIR_U128(a_Dst, a_iXReg1, a_iXReg2) \
520 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
521 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
522 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
523 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
524 } while (0)
525#define IEM_MC_FETCH_XREG_PAIR_XMM(a_Dst, a_iXReg1, a_iXReg2) \
526 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
527 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
528 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
529 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
530 } while (0)
531#define IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iXReg2) \
532 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
533 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
534 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
535 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
536 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
537 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
538 } while (0)
539#define IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iXReg2) \
540 do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
541 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
542 (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
543 (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
544 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
545 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
546 } while (0)
547#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
548 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
549 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
550 } while (0)
551#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
552 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
553 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
554 } while (0)
555#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
556 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
557#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
558 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
559#define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
560 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
561#define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
562 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
563#define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
564 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)] = (a_u16Value); } while (0)
565#define IEM_MC_STORE_XREG_U8(a_iXReg, a_iByte, a_u8Value) \
566 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)] = (a_u8Value); } while (0)
567
568#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
569 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
570 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
571 } while (0)
572
573#define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
574 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
575#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
576 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
577#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
578 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
579#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
580 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
581 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
582 } while (0)
583
584#define IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX(a_iXRegDst, a_u8Src) \
585 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
586 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[0] = (a_u8Src); \
587 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[1] = (a_u8Src); \
588 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[2] = (a_u8Src); \
589 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[3] = (a_u8Src); \
590 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[4] = (a_u8Src); \
591 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[5] = (a_u8Src); \
592 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[6] = (a_u8Src); \
593 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[7] = (a_u8Src); \
594 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[8] = (a_u8Src); \
595 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[9] = (a_u8Src); \
596 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[10] = (a_u8Src); \
597 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[11] = (a_u8Src); \
598 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[12] = (a_u8Src); \
599 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[13] = (a_u8Src); \
600 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[14] = (a_u8Src); \
601 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[15] = (a_u8Src); \
602 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
603 } while (0)
604#define IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX(a_iXRegDst, a_u16Src) \
605 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
606 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[0] = (a_u16Src); \
607 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[1] = (a_u16Src); \
608 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[2] = (a_u16Src); \
609 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[3] = (a_u16Src); \
610 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[4] = (a_u16Src); \
611 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[5] = (a_u16Src); \
612 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[6] = (a_u16Src); \
613 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[7] = (a_u16Src); \
614 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
615 } while (0)
616#define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
617 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
618 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[0] = (a_u32Src); \
619 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[1] = (a_u32Src); \
620 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[2] = (a_u32Src); \
621 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[3] = (a_u32Src); \
622 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
623 } while (0)
624#define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
625 do { uintptr_t const iXRegDstTmp = (a_iXRegDst); \
626 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[0] = (a_u64Src); \
627 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[1] = (a_u64Src); \
628 IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
629 } while (0)
630
631#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
632 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
633#define IEM_MC_REF_XREG_XMM(a_pXmmDst, a_iXReg) \
634 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
635#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
636 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
637#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
638 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
639#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
640 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
641#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
642 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
643#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
644 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
645#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
646 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
647#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
648 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
649 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
650 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
651 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
652 } while (0)
653
654#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
655 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
656 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
657 } while (0)
658#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc, a_iQWord) \
659 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
660 if ((a_iQWord) < 2) \
661 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[(a_iQWord)]; \
662 else \
663 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[(a_iQWord) - 2]; \
664 } while (0)
665#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc, a_iDQword) \
666 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
667 if ((a_iDQword) == 0) \
668 { \
669 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[0]; \
670 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[1]; \
671 } \
672 else \
673 { \
674 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[0]; \
675 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[1]; \
676 } \
677 } while (0)
678#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
679 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
680 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
681 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
682 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
683 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
684 } while (0)
685#define IEM_MC_FETCH_YREG_YMM(a_uYmmDst, a_iYRegSrc) \
686 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
687 (a_uYmmDst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
688 (a_uYmmDst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
689 (a_uYmmDst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
690 (a_uYmmDst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
691 } while (0)
692#define IEM_MC_FETCH_YREG_PAIR_YMM(a_uYmmDst, a_iYRegSrc1, a_iYRegSrc2) \
693 do { uintptr_t const iYRegSrc1Tmp = (a_iYRegSrc1); \
694 uintptr_t const iYRegSrc2Tmp = (a_iYRegSrc2); \
695 (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc1Tmp].au64[0]; \
696 (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc1Tmp].au64[1]; \
697 (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc1Tmp].au64[0]; \
698 (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc1Tmp].au64[1]; \
699 (a_uYmmDst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc2Tmp].au64[0]; \
700 (a_uYmmDst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc2Tmp].au64[1]; \
701 (a_uYmmDst).uSrc2.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc2Tmp].au64[0]; \
702 (a_uYmmDst).uSrc2.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc2Tmp].au64[1]; \
703 } while (0)
704
705#define IEM_MC_STORE_YREG_U128(a_iYRegDst, a_iDQword, a_u128Value) \
706 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
707 if ((a_iDQword) == 0) \
708 { \
709 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
710 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
711 } \
712 else \
713 { \
714 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
715 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
716 } \
717 } while (0)
718
719#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
720#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
721 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
722 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
723 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
724 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
725 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
726 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
727 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
728 } while (0)
729#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
730 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
731 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
732 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
733 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
734 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
735 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
736 } while (0)
737#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
738 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
739 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
740 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
741 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
742 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
743 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
744 } while (0)
745#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
746 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
747 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
748 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
749 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
750 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
751 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
752 } while (0)
753#define IEM_MC_STORE_YREG_YMM_ZX_VLMAX(a_iYRegDst, a_uYmmSrc) \
754 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
755 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_uYmmSrc).au64[0]; \
756 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_uYmmSrc).au64[1]; \
757 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_uYmmSrc).au64[2]; \
758 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_uYmmSrc).au64[3]; \
759 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
760 } while (0)
761#define IEM_MC_STORE_YREG_U32_U256(a_iYRegDst, a_iDwDst, a_u256Value, a_iDwSrc) \
762 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
763 if ((a_iDwDst) < 4) \
764 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au32[(a_iDwDst)] = (a_u256Value).au32[(a_iDwSrc)]; \
765 else \
766 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au32[(a_iDwDst) - 4] = (a_u256Value).au32[(a_iDwSrc)]; \
767 } while (0)
768#define IEM_MC_STORE_YREG_U64_U256(a_iYRegDst, a_iQwDst, a_u256Value, a_iQwSrc) \
769 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
770 if ((a_iQwDst) < 2) \
771 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[(a_iQwDst)] = (a_u256Value).au64[(a_iQwSrc)]; \
772 else \
773 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[(a_iQwDst) - 2] = (a_u256Value).au64[(a_iQwSrc)]; \
774 } while (0)
775#define IEM_MC_STORE_YREG_U64(a_iYRegDst, a_iQword, a_u64Value) \
776 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
777 if ((a_iQword) < 2) \
778 pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[(a_iQword)] = (a_u64Value); \
779 else \
780 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[(a_iQword) - 2] = (a_u64Value); \
781 } while (0)
782
783#define IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX(a_iYRegDst, a_u8Src) \
784 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
785 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[0] = (a_u8Src); \
786 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[1] = (a_u8Src); \
787 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[2] = (a_u8Src); \
788 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[3] = (a_u8Src); \
789 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[4] = (a_u8Src); \
790 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[5] = (a_u8Src); \
791 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[6] = (a_u8Src); \
792 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[7] = (a_u8Src); \
793 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[8] = (a_u8Src); \
794 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[9] = (a_u8Src); \
795 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[10] = (a_u8Src); \
796 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[11] = (a_u8Src); \
797 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[12] = (a_u8Src); \
798 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[13] = (a_u8Src); \
799 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[14] = (a_u8Src); \
800 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[15] = (a_u8Src); \
801 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[0] = (a_u8Src); \
802 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[1] = (a_u8Src); \
803 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[2] = (a_u8Src); \
804 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[3] = (a_u8Src); \
805 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[4] = (a_u8Src); \
806 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[5] = (a_u8Src); \
807 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[6] = (a_u8Src); \
808 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[7] = (a_u8Src); \
809 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[8] = (a_u8Src); \
810 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[9] = (a_u8Src); \
811 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[10] = (a_u8Src); \
812 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[11] = (a_u8Src); \
813 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[12] = (a_u8Src); \
814 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[13] = (a_u8Src); \
815 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[14] = (a_u8Src); \
816 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[15] = (a_u8Src); \
817 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
818 } while (0)
819#define IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX(a_iYRegDst, a_u16Src) \
820 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
821 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[0] = (a_u16Src); \
822 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[1] = (a_u16Src); \
823 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[2] = (a_u16Src); \
824 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[3] = (a_u16Src); \
825 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[4] = (a_u16Src); \
826 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[5] = (a_u16Src); \
827 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[6] = (a_u16Src); \
828 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[7] = (a_u16Src); \
829 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[0] = (a_u16Src); \
830 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[1] = (a_u16Src); \
831 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[2] = (a_u16Src); \
832 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[3] = (a_u16Src); \
833 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[4] = (a_u16Src); \
834 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[5] = (a_u16Src); \
835 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[6] = (a_u16Src); \
836 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[7] = (a_u16Src); \
837 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
838 } while (0)
839#define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
840 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
841 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
842 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = (a_u32Src); \
843 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[2] = (a_u32Src); \
844 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[3] = (a_u32Src); \
845 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[0] = (a_u32Src); \
846 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[1] = (a_u32Src); \
847 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[2] = (a_u32Src); \
848 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[3] = (a_u32Src); \
849 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
850 } while (0)
851#define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
852 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
853 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
854 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Src); \
855 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u64Src); \
856 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u64Src); \
857 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
858 } while (0)
859#define IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
860 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
861 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
862 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
863 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
864 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
865 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
866 } while (0)
867
868#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
869 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
870#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
871 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
872#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
873 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
874#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
875 do { uintptr_t const iYRegTmp = (a_iYReg); \
876 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
877 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
878 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
879 } while (0)
880
881#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
882 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
883 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
884 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
885 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
886 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
887 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
888 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
889 } while (0)
890#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
891 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
892 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
893 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
894 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
895 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
896 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
897 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
898 } while (0)
899#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
900 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
901 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
902 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
903 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
904 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
905 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
906 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
907 } while (0)
908
909#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
910 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
911 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
912 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
913 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
914 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
915 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
916 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
917 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
918 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
919 } while (0)
920#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
921 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
922 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
923 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
924 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
925 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
926 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
927 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
928 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
929 } while (0)
930#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovlhps */ \
931 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
932 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
933 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
934 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
935 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
936 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
937 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
938 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
939 } while (0)
940#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
941 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
942 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
943 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
944 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
945 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
946 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
947 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
948 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
949 } while (0)
950#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
951 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
952 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
953 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
954 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
955 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
956 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
957 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
958 } while (0)
959#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
960 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
961 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
962 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
963 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
964 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
965 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
966 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
967 } while (0)
968
969#define IEM_MC_CLEAR_ZREG_256_UP(a_iYReg) \
970 do { IEM_MC_INT_CLEAR_ZMM_256_UP(a_iYReg); } while (0)
971
972#define IEM_MC_FETCH_MEM_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
973 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
974#define IEM_MC_FETCH_MEM16_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
975 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
976#define IEM_MC_FETCH_MEM32_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
977 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
978
979#define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
980 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
981#define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
982 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
983#define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
984 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
985
986#define IEM_MC_FETCH_MEM_SEG_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
987 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
988#define IEM_MC_FETCH_MEM_SEG_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
989 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
990#define IEM_MC_FETCH_MEM_SEG_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
991 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
992#define IEM_MC_FETCH_MEM_SEG_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
993 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
994
995#define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
996 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
997#define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
998 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
999#define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
1000 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1001#define IEM_MC_FETCH_MEM_FLAT_I16_DISP(a_i16Dst, a_GCPtrMem, a_offDisp) \
1002 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
1003
1004#define IEM_MC_FETCH_MEM_SEG_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1005 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1006#define IEM_MC_FETCH_MEM_SEG_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
1007 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
1008#define IEM_MC_FETCH_MEM_SEG_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
1009 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1010#define IEM_MC_FETCH_MEM_SEG_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
1011 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
1012
1013#define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
1014 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1015#define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
1016 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
1017#define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
1018 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1019#define IEM_MC_FETCH_MEM_FLAT_I32_DISP(a_i32Dst, a_GCPtrMem, a_offDisp) \
1020 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
1021
1022#define IEM_MC_FETCH_MEM_SEG_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1023 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1024#define IEM_MC_FETCH_MEM_SEG_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
1025 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
1026#define IEM_MC_FETCH_MEM_SEG_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
1027 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1028#define IEM_MC_FETCH_MEM_SEG_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
1029 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1030
1031#define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
1032 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1033#define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
1034 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
1035#define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
1036 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
1037#define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
1038 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1039
1040#define IEM_MC_FETCH_MEM_SEG_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
1041 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1042#define IEM_MC_FETCH_MEM_SEG_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
1043 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1044#define IEM_MC_FETCH_MEM_SEG_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
1045 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
1046#define IEM_MC_FETCH_MEM_SEG_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
1047 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
1048
1049#define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
1050 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1051#define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
1052 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
1053#define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
1054 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
1055#define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
1056 iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
1057
1058#define IEM_MC_FETCH_MEM_SEG_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
1059 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1060#define IEM_MC_FETCH_MEM_SEG_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
1061 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1062#define IEM_MC_FETCH_MEM_SEG_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
1063 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
1064
1065#define IEM_MC_FETCH_MEM_SEG_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
1066 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1067#define IEM_MC_FETCH_MEM_SEG_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
1068 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1069#define IEM_MC_FETCH_MEM_SEG_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
1070 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
1071
1072#define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
1073 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1074#define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
1075 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1076#define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
1077 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
1078
1079#define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
1080 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1081#define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
1082 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1083#define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
1084 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
1085
1086#define IEM_MC_FETCH_MEM_SEG_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1087 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1088 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1089 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1090 } while (0)
1091#define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1092 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1093 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1094 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1095 } while (0)
1096
1097#define IEM_MC_FETCH_MEM_SEG_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1098 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
1099 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1100 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1101 } while (0)
1102
1103#define IEM_MC_FETCH_MEM_SEG_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1104 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
1105 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1106 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1107 } while (0)
1108
1109#define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1110 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
1111 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1112 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1113 } while (0)
1114
1115#define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1116 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
1117 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1118 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1119 } while (0)
1120
1121#define IEM_MC_FETCH_MEM_SEG_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
1122 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1123 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1124 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1125 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1126 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1127 } while (0)
1128#define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
1129 (a_Dst).uSrc2.uXmm.au64[0] = 0; \
1130 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1131 (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem2)); \
1132 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1133 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1134 } while (0)
1135
1136#define IEM_MC_FETCH_MEM_SEG_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
1137 (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \
1138 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
1139 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1140 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1141 } while (0)
1142#define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do { \
1143 (a_Dst).uSrc2.uXmm.au64[1] = 0; \
1144 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \
1145 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1146 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1147 } while (0)
1148
1149
1150#define IEM_MC_FETCH_MEM_SEG_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1151 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1152 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1153 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1154 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1155 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1156 } while (0)
1157#define IEM_MC_FETCH_MEM_SEG_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
1158 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
1159 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1160 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1161 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1162 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1163 } while (0)
1164
1165#define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1166 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1167 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1168 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1169 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \
1170 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \
1171 } while (0)
1172#define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
1173 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
1174 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
1175 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
1176 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
1177 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
1178 } while (0)
1179
1180
1181#define IEM_MC_FETCH_MEM_SEG_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
1182 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1183#define IEM_MC_FETCH_MEM_SEG_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
1184 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1185#define IEM_MC_FETCH_MEM_SEG_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
1186 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
1187
1188#define IEM_MC_FETCH_MEM_SEG_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
1189 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1190#define IEM_MC_FETCH_MEM_SEG_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
1191 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1192#define IEM_MC_FETCH_MEM_SEG_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
1193 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
1194
1195#define IEM_MC_FETCH_MEM_SEG_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \
1196 uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
1197 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_iSeg2), (a_GCPtrMem2)); \
1198 (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[0]; \
1199 (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[1]; \
1200 (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[0]; \
1201 (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \
1202 } while (0)
1203
1204#define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
1205 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1206#define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
1207 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1208#define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
1209 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
1210
1211#define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
1212 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1213#define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
1214 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1215#define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
1216 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
1217
1218#define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_GCPtrMem2) do { \
1219 uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
1220 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_GCPtrMem2)); \
1221 (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[0]; \
1222 (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[1]; \
1223 (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[0]; \
1224 (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \
1225 } while (0)
1226
1227
1228
1229#define IEM_MC_FETCH_MEM_SEG_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1230 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1231#define IEM_MC_FETCH_MEM_SEG_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1232 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1233#define IEM_MC_FETCH_MEM_SEG_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1234 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1235#define IEM_MC_FETCH_MEM_SEG_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1236 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1237#define IEM_MC_FETCH_MEM_SEG_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1238 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1239#define IEM_MC_FETCH_MEM_SEG_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1240 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1241
1242#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
1243 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1244#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
1245 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1246#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
1247 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1248#define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
1249 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1250#define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
1251 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1252#define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
1253 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1254
1255#define IEM_MC_FETCH_MEM_SEG_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
1256 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1257#define IEM_MC_FETCH_MEM_SEG_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1258 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1259#define IEM_MC_FETCH_MEM_SEG_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1260 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1261#define IEM_MC_FETCH_MEM_SEG_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
1262 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1263#define IEM_MC_FETCH_MEM_SEG_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1264 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1265#define IEM_MC_FETCH_MEM_SEG_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
1266 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
1267
1268#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
1269 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1270#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
1271 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1272#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
1273 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
1274#define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
1275 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1276#define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
1277 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
1278#define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
1279 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
1280
1281#define IEM_MC_STORE_MEM_SEG_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
1282 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
1283#define IEM_MC_STORE_MEM_SEG_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
1284 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
1285#define IEM_MC_STORE_MEM_SEG_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
1286 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
1287#define IEM_MC_STORE_MEM_SEG_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
1288 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
1289
1290#define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
1291 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
1292#define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
1293 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
1294#define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
1295 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
1296#define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
1297 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
1298
1299#define IEM_MC_STORE_MEM_SEG_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
1300 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
1301#define IEM_MC_STORE_MEM_SEG_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
1302 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
1303#define IEM_MC_STORE_MEM_SEG_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
1304 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
1305#define IEM_MC_STORE_MEM_SEG_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
1306 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
1307
1308#define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
1309 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
1310#define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
1311 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
1312#define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
1313 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
1314#define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
1315 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
1316
1317#define IEM_MC_STORE_MEM_BY_REF_I8_CONST( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
1318#define IEM_MC_STORE_MEM_BY_REF_I16_CONST(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
1319#define IEM_MC_STORE_MEM_BY_REF_I32_CONST(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
1320#define IEM_MC_STORE_MEM_BY_REF_I64_CONST(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
1321#define IEM_MC_STORE_MEM_BY_REF_R32_NEG_QNAN(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
1322#define IEM_MC_STORE_MEM_BY_REF_R64_NEG_QNAN(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
1323#define IEM_MC_STORE_MEM_BY_REF_R80_NEG_QNAN(a_pr80Dst) \
1324 do { \
1325 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1326 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
1327 } while (0)
1328#define IEM_MC_STORE_MEM_BY_REF_D80_INDEF(a_pd80Dst) \
1329 do { \
1330 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
1331 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
1332 } while (0)
1333
1334#define IEM_MC_STORE_MEM_SEG_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1335 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1336#define IEM_MC_STORE_MEM_SEG_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
1337 iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
1338#define IEM_MC_STORE_MEM_SEG_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1339 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1340
1341#define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
1342 iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1343#define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
1344 iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
1345#define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
1346 iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
1347
1348#define IEM_MC_STORE_MEM_SEG_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1349 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1350#define IEM_MC_STORE_MEM_SEG_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
1351 iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1352#define IEM_MC_STORE_MEM_SEG_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1353 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1354
1355#define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
1356 iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1357#define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
1358 iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1359#define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
1360 iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
1361
1362/* Regular stack push and pop: */
1363#define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
1364#define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
1365#define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
1366#define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
1367
1368#define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
1369#define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
1370#define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
1371
1372/* 32-bit flat stack push and pop: */
1373#define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
1374#define IEM_MC_FLAT32_PUSH_U32(a_u32Value) iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
1375#define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
1376
1377#define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
1378#define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
1379
1380/* 64-bit flat stack push and pop: */
1381#define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
1382#define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
1383
1384#define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
1385#define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
1386
1387
1388/* 8-bit */
1389
1390/**
1391 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1392 * acccess, for atomic operations.
1393 *
1394 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1395 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1396 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1397 * @param[in] a_GCPtrMem The memory address.
1398 * @remarks Will return/long jump on errors.
1399 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1400 */
1401#define IEM_MC_MEM_SEG_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1402 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1403
1404/**
1405 * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
1406 *
1407 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1408 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1409 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1410 * @param[in] a_GCPtrMem The memory address.
1411 * @remarks Will return/long jump on errors.
1412 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1413 */
1414#define IEM_MC_MEM_SEG_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1415 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1416
1417/**
1418 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
1419 *
1420 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1421 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1422 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1423 * @param[in] a_GCPtrMem The memory address.
1424 * @remarks Will return/long jump on errors.
1425 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1426 */
1427#define IEM_MC_MEM_SEG_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1428 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1429
1430/**
1431 * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
1432 *
1433 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1434 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1435 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1436 * @param[in] a_GCPtrMem The memory address.
1437 * @remarks Will return/long jump on errors.
1438 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1439 */
1440#define IEM_MC_MEM_SEG_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1441 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1442
1443/**
1444 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
1445 * acccess, flat address variant.
1446 *
1447 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1448 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1449 * @param[in] a_GCPtrMem The memory address.
1450 * @remarks Will return/long jump on errors.
1451 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1452 */
1453#define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1454 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1455
1456/**
1457 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
1458 * address variant.
1459 *
1460 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1461 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1462 * @param[in] a_GCPtrMem The memory address.
1463 * @remarks Will return/long jump on errors.
1464 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1465 */
1466#define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1467 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1468
1469/**
1470 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
1471 * address variant.
1472 *
1473 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1474 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1475 * @param[in] a_GCPtrMem The memory address.
1476 * @remarks Will return/long jump on errors.
1477 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1478 */
1479#define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1480 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1481
1482/**
1483 * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
1484 * address variant.
1485 *
1486 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
1487 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1488 * @param[in] a_GCPtrMem The memory address.
1489 * @remarks Will return/long jump on errors.
1490 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1491 */
1492#define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
1493 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1494
1495
1496/* 16-bit */
1497
1498/**
1499 * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess.
1500 *
1501 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1502 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1503 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1504 * @param[in] a_GCPtrMem The memory address.
1505 * @remarks Will return/long jump on errors.
1506 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1507 */
1508#define IEM_MC_MEM_SEG_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1509 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1510
1511/**
1512 * Maps guest memory for word read+write direct (or bounce) buffer acccess.
1513 *
1514 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1515 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1516 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1517 * @param[in] a_GCPtrMem The memory address.
1518 * @remarks Will return/long jump on errors.
1519 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1520 */
1521#define IEM_MC_MEM_SEG_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1522 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1523
1524/**
1525 * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
1526 *
1527 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1528 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1529 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1530 * @param[in] a_GCPtrMem The memory address.
1531 * @remarks Will return/long jump on errors.
1532 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1533 */
1534#define IEM_MC_MEM_SEG_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1535 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1536
1537/**
1538 * Maps guest memory for word readonly direct (or bounce) buffer acccess.
1539 *
1540 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1541 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1542 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1543 * @param[in] a_GCPtrMem The memory address.
1544 * @remarks Will return/long jump on errors.
1545 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1546 */
1547#define IEM_MC_MEM_SEG_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1548 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1549
1550/**
1551 * Maps guest memory for word atomic read+write direct (or bounce) buffer
1552 * acccess, flat address variant.
1553 *
1554 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1555 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1556 * @param[in] a_GCPtrMem The memory address.
1557 * @remarks Will return/long jump on errors.
1558 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1559 */
1560#define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1561 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1562
1563/**
1564 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
1565 * address variant.
1566 *
1567 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1568 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1569 * @param[in] a_GCPtrMem The memory address.
1570 * @remarks Will return/long jump on errors.
1571 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1572 */
1573#define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1574 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1575
1576/**
1577 * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
1578 * address variant.
1579 *
1580 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1581 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1582 * @param[in] a_GCPtrMem The memory address.
1583 * @remarks Will return/long jump on errors.
1584 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1585 */
1586#define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1587 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1588
1589/**
1590 * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
1591 * address variant.
1592 *
1593 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
1594 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1595 * @param[in] a_GCPtrMem The memory address.
1596 * @remarks Will return/long jump on errors.
1597 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1598 */
1599#define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
1600 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1601
1602/** int16_t alias. */
1603#define IEM_MC_MEM_SEG_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1604 (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1605
1606/** Flat int16_t alias. */
1607#define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
1608 (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1609
1610
1611/* 32-bit */
1612
1613/**
1614 * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess.
1615 *
1616 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1617 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1618 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1619 * @param[in] a_GCPtrMem The memory address.
1620 * @remarks Will return/long jump on errors.
1621 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1622 */
1623#define IEM_MC_MEM_SEG_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1624 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1625
1626/**
1627 * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
1628 *
1629 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1630 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1631 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1632 * @param[in] a_GCPtrMem The memory address.
1633 * @remarks Will return/long jump on errors.
1634 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1635 */
1636#define IEM_MC_MEM_SEG_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1637 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1638
1639/**
1640 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
1641 *
1642 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1643 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1644 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1645 * @param[in] a_GCPtrMem The memory address.
1646 * @remarks Will return/long jump on errors.
1647 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1648 */
1649#define IEM_MC_MEM_SEG_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1650 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1651
1652/**
1653 * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
1654 *
1655 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1656 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1657 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1658 * @param[in] a_GCPtrMem The memory address.
1659 * @remarks Will return/long jump on errors.
1660 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1661 */
1662#define IEM_MC_MEM_SEG_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1663 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1664
1665/**
1666 * Maps guest memory for dword atomic read+write direct (or bounce) buffer
1667 * acccess, flat address variant.
1668 *
1669 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1670 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1671 * @param[in] a_GCPtrMem The memory address.
1672 * @remarks Will return/long jump on errors.
1673 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1674 */
1675#define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1676 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1677
1678/**
1679 * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
1680 * flat address variant.
1681 *
1682 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1683 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1684 * @param[in] a_GCPtrMem The memory address.
1685 * @remarks Will return/long jump on errors.
1686 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1687 */
1688#define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1689 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1690
1691/**
1692 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
1693 * address variant.
1694 *
1695 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1696 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1697 * @param[in] a_GCPtrMem The memory address.
1698 * @remarks Will return/long jump on errors.
1699 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1700 */
1701#define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1702 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1703
1704/**
1705 * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
1706 * address variant.
1707 *
1708 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
1709 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1710 * @param[in] a_GCPtrMem The memory address.
1711 * @remarks Will return/long jump on errors.
1712 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1713 */
1714#define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
1715 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1716
1717/** int32_t alias. */
1718#define IEM_MC_MEM_SEG_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1719 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1720
1721/** Flat int32_t alias. */
1722#define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
1723 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1724
1725/** RTFLOAT32U alias. */
1726#define IEM_MC_MEM_SEG_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1727 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1728
1729/** Flat RTFLOAT32U alias. */
1730#define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
1731 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1732
1733
1734/* 64-bit */
1735
1736/**
1737 * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess.
1738 *
1739 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1740 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1741 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1742 * @param[in] a_GCPtrMem The memory address.
1743 * @remarks Will return/long jump on errors.
1744 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1745 */
1746#define IEM_MC_MEM_SEG_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1747 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1748
1749/**
1750 * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
1751 *
1752 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1753 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1754 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1755 * @param[in] a_GCPtrMem The memory address.
1756 * @remarks Will return/long jump on errors.
1757 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1758 */
1759#define IEM_MC_MEM_SEG_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1760 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1761
1762/**
1763 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
1764 *
1765 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1766 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1767 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1768 * @param[in] a_GCPtrMem The memory address.
1769 * @remarks Will return/long jump on errors.
1770 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1771 */
1772#define IEM_MC_MEM_SEG_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1773 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1774
1775/**
1776 * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
1777 *
1778 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1779 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1780 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1781 * @param[in] a_GCPtrMem The memory address.
1782 * @remarks Will return/long jump on errors.
1783 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1784 */
1785#define IEM_MC_MEM_SEG_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1786 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1787
1788/**
1789 * Maps guest memory for qword atomic read+write direct (or bounce) buffer
1790 * acccess, flat address variant.
1791 *
1792 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1793 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1794 * @param[in] a_GCPtrMem The memory address.
1795 * @remarks Will return/long jump on errors.
1796 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1797 */
1798#define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1799 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1800
1801/**
1802 * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
1803 * flat address variant.
1804 *
1805 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1806 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1807 * @param[in] a_GCPtrMem The memory address.
1808 * @remarks Will return/long jump on errors.
1809 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1810 */
1811#define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1812 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1813
1814/**
1815 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
1816 * address variant.
1817 *
1818 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1819 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1820 * @param[in] a_GCPtrMem The memory address.
1821 * @remarks Will return/long jump on errors.
1822 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1823 */
1824#define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1825 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1826
1827/**
1828 * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
1829 * address variant.
1830 *
1831 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
1832 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1833 * @param[in] a_GCPtrMem The memory address.
1834 * @remarks Will return/long jump on errors.
1835 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1836 */
1837#define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
1838 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1839
1840/** int64_t alias. */
1841#define IEM_MC_MEM_SEG_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1842 (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1843
1844/** Flat int64_t alias. */
1845#define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
1846 (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1847
1848/** RTFLOAT64U alias. */
1849#define IEM_MC_MEM_SEG_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1850 (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1851
1852/** Flat RTFLOAT64U alias. */
1853#define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
1854 (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1855
1856
1857/* 128-bit */
1858
1859/**
1860 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess.
1861 *
1862 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1863 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1864 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1865 * @param[in] a_GCPtrMem The memory address.
1866 * @remarks Will return/long jump on errors.
1867 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1868 */
1869#define IEM_MC_MEM_SEG_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1870 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1871
1872/**
1873 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess.
1874 *
1875 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1876 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1877 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1878 * @param[in] a_GCPtrMem The memory address.
1879 * @remarks Will return/long jump on errors.
1880 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1881 */
1882#define IEM_MC_MEM_SEG_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1883 (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1884
1885/**
1886 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess.
1887 *
1888 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1889 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1890 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1891 * @param[in] a_GCPtrMem The memory address.
1892 * @remarks Will return/long jump on errors.
1893 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1894 */
1895#define IEM_MC_MEM_SEG_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1896 (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1897
1898/**
1899 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess.
1900 *
1901 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1902 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1903 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1904 * @param[in] a_GCPtrMem The memory address.
1905 * @remarks Will return/long jump on errors.
1906 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1907 */
1908#define IEM_MC_MEM_SEG_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1909 (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1910
1911/**
1912 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer
1913 * access, flat address variant.
1914 *
1915 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1916 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1917 * @param[in] a_GCPtrMem The memory address.
1918 * @remarks Will return/long jump on errors.
1919 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
1920 */
1921#define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
1922 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1923
1924/**
1925 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess,
1926 * flat address variant.
1927 *
1928 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1929 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1930 * @param[in] a_GCPtrMem The memory address.
1931 * @remarks Will return/long jump on errors.
1932 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
1933 */
1934#define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
1935 (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1936
1937/**
1938 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess,
1939 * flat address variant.
1940 *
1941 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1942 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1943 * @param[in] a_GCPtrMem The memory address.
1944 * @remarks Will return/long jump on errors.
1945 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1946 */
1947#define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
1948 (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1949
1950/**
1951 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess, flat
1952 * address variant.
1953 *
1954 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1955 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1956 * @param[in] a_GCPtrMem The memory address.
1957 * @remarks Will return/long jump on errors.
1958 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1959 */
1960#define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
1961 (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1962
1963
1964/* misc */
1965
1966/**
1967 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
1968 *
1969 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
1970 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1971 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1972 * @param[in] a_GCPtrMem The memory address.
1973 * @remarks Will return/long jump on errors.
1974 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1975 */
1976#define IEM_MC_MEM_SEG_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
1977 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
1978
1979/**
1980 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
1981 *
1982 * @param[out] a_pr80Mem Where to return the pointer to the mapping.
1983 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1984 * @param[in] a_GCPtrMem The memory address.
1985 * @remarks Will return/long jump on errors.
1986 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1987 */
1988#define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
1989 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1990
1991
1992/**
1993 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
1994 *
1995 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
1996 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1997 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
1998 * @param[in] a_GCPtrMem The memory address.
1999 * @remarks Will return/long jump on errors.
2000 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2001 */
2002#define IEM_MC_MEM_SEG_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
2003 (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
2004
2005/**
2006 * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
2007 *
2008 * @param[out] a_pd80Mem Where to return the pointer to the mapping.
2009 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
2010 * @param[in] a_GCPtrMem The memory address.
2011 * @remarks Will return/long jump on errors.
2012 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
2013 */
2014#define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
2015 (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
2016
2017
2018
2019/* commit + unmap */
2020
2021/** Commits the memory and unmaps guest memory previously mapped RW.
2022 * @remarks May return.
2023 * @note Implictly frees the a_bMapInfo variable.
2024 */
2025#define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2026
2027/** Commits the memory and unmaps guest memory previously mapped ATOMIC.
2028 * @remarks May return.
2029 * @note Implictly frees the a_bMapInfo variable.
2030 */
2031#define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
2032
2033/** Commits the memory and unmaps guest memory previously mapped W.
2034 * @remarks May return.
2035 * @note Implictly frees the a_bMapInfo variable.
2036 */
2037#define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
2038
2039/** Commits the memory and unmaps guest memory previously mapped R.
2040 * @remarks May return.
2041 * @note Implictly frees the a_bMapInfo variable.
2042 */
2043#define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
2044
2045
2046/** Commits the memory and unmaps the guest memory unless the FPU status word
2047 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
2048 * that would cause FLD not to store.
2049 *
2050 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
2051 * store, while \#P will not.
2052 *
2053 * @remarks May in theory return - for now.
2054 * @note Implictly frees both the a_bMapInfo and a_u16FSW variables.
2055 */
2056#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
2057 if ( !(a_u16FSW & X86_FSW_ES) \
2058 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
2059 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
2060 iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \
2061 else \
2062 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
2063 } while (0)
2064
2065/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
2066 * @note Implictly frees the a_bMapInfo variable. */
2067#define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
2068
2069
2070
2071/** Calculate efficient address from R/M. */
2072#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
2073 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
2074
2075
2076/** The @a a_fSupportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */
2077#define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (false) {
2078#define IEM_MC_NATIVE_ELSE() } else {
2079#define IEM_MC_NATIVE_ENDIF() } ((void)0)
2080
2081#define IEM_MC_NATIVE_EMIT_0(a_fnEmitter)
2082#define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0) (void)(a0)
2083#define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
2084#define IEM_MC_NATIVE_EMIT_2_EX(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
2085#define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2) (void)(a0), (void)(a1), (void)(a2)
2086#define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3) (void)(a0), (void)(a1), (void)(a2), (void)(a3)
2087#define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4)
2088#define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5)
2089#define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6)
2090#define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7)
2091
2092/** This can be used to direct the register allocator when dealing with
2093 * x86/AMD64 instructions (like SHL reg,CL) that takes fixed registers. */
2094#define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0)
2095
2096
2097#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
2098#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
2099#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
2100#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
2101#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
2102#define IEM_MC_CALL_AIMPL_3(a_rcType, a_rc, a_pfn, a0, a1, a2) a_rcType const a_rc = (a_pfn)((a0), (a1), (a2))
2103#define IEM_MC_CALL_AIMPL_4(a_rcType, a_rc, a_pfn, a0, a1, a2, a3) a_rcType const a_rc = (a_pfn)((a0), (a1), (a2), (a3))
2104
2105
2106/** @def IEM_MC_CALL_CIMPL_HLP_RET
2107 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
2108 */
2109#ifdef VBOX_STRICT
2110# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
2111 do { \
2112 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
2113 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \
2114 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \
2115 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \
2116 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \
2117 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
2118 if (rcStrictHlp == VINF_SUCCESS) \
2119 { \
2120 uint64_t const fRipMask = (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT ? UINT64_MAX : UINT32_MAX; \
2121 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
2122 || ( ((uRipBefore + cbInstr) & fRipMask) == pVCpu->cpum.GstCtx.rip \
2123 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \
2124 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \
2125 && uRipBefore == pVCpu->cpum.GstCtx.rip \
2126 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \
2127 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
2128 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, (uRipBefore + cbInstr) & fRipMask)); \
2129 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
2130 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
2131 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
2132 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
2133 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
2134 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2135 else \
2136 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
2137 == (fEflBefore & ~(X86_EFL_RF)), \
2138 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
2139 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
2140 { \
2141 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
2142 AssertMsg( fExecBefore == fExecRecalc \
2143 /* in case ES, DS or SS was external initially (happens alot with HM): */ \
2144 || ( fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \
2145 && (fExecRecalc & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT), \
2146 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
2147 } \
2148 } \
2149 return rcStrictHlp; \
2150 } while (0)
2151#else
2152# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
2153#endif
2154
2155/**
2156 * Defers the rest of the instruction emulation to a C implementation routine
2157 * and returns, only taking the standard parameters.
2158 *
2159 * @param a_fFlags IEM_CIMPL_F_XXX.
2160 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2161 * in the native recompiler.
2162 * @param a_pfnCImpl The pointer to the C routine.
2163 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2164 */
2165#define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2166 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2167
2168/**
2169 * Defers the rest of instruction emulation to a C implementation routine and
2170 * returns, taking one argument in addition to the standard ones.
2171 *
2172 * @param a_fFlags IEM_CIMPL_F_XXX.
2173 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2174 * in the native recompiler.
2175 * @param a_pfnCImpl The pointer to the C routine.
2176 * @param a0 The argument.
2177 */
2178#define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2179 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2180
2181/**
2182 * Defers the rest of the instruction emulation to a C implementation routine
2183 * and returns, taking two arguments in addition to the standard ones.
2184 *
2185 * @param a_fFlags IEM_CIMPL_F_XXX.
2186 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2187 * in the native recompiler.
2188 * @param a_pfnCImpl The pointer to the C routine.
2189 * @param a0 The first extra argument.
2190 * @param a1 The second extra argument.
2191 */
2192#define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2193 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2194
2195/**
2196 * Defers the rest of the instruction emulation to a C implementation routine
2197 * and returns, taking three arguments in addition to the standard ones.
2198 *
2199 * @param a_fFlags IEM_CIMPL_F_XXX.
2200 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2201 * in the native recompiler.
2202 * @param a_pfnCImpl The pointer to the C routine.
2203 * @param a0 The first extra argument.
2204 * @param a1 The second extra argument.
2205 * @param a2 The third extra argument.
2206 */
2207#define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2208 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2209
2210/**
2211 * Defers the rest of the instruction emulation to a C implementation routine
2212 * and returns, taking four arguments in addition to the standard ones.
2213 *
2214 * @param a_fFlags IEM_CIMPL_F_XXX.
2215 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2216 * in the native recompiler.
2217 * @param a_pfnCImpl The pointer to the C routine.
2218 * @param a0 The first extra argument.
2219 * @param a1 The second extra argument.
2220 * @param a2 The third extra argument.
2221 * @param a3 The fourth extra argument.
2222 */
2223#define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
2224 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
2225
2226/**
2227 * Defers the rest of the instruction emulation to a C implementation routine
2228 * and returns, taking five arguments in addition to the standard ones.
2229 *
2230 * @param a_fFlags IEM_CIMPL_F_XXX.
2231 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2232 * in the native recompiler.
2233 * @param a_pfnCImpl The pointer to the C routine.
2234 * @param a0 The first extra argument.
2235 * @param a1 The second extra argument.
2236 * @param a2 The third extra argument.
2237 * @param a3 The fourth extra argument.
2238 * @param a4 The fifth extra argument.
2239 */
2240#define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
2241 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
2242
2243/**
2244 * Defers the entire instruction emulation to a C implementation routine and
2245 * returns, only taking the standard parameters.
2246 *
2247 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2248 *
2249 * @param a_fFlags IEM_CIMPL_F_XXX.
2250 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2251 * in the native recompiler.
2252 * @param a_pfnCImpl The pointer to the C routine.
2253 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
2254 */
2255#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
2256 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
2257
2258/**
2259 * Defers the entire instruction emulation to a C implementation routine and
2260 * returns, taking one argument in addition to the standard ones.
2261 *
2262 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2263 *
2264 * @param a_fFlags IEM_CIMPL_F_XXX.
2265 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2266 * in the native recompiler.
2267 * @param a_pfnCImpl The pointer to the C routine.
2268 * @param a0 The argument.
2269 */
2270#define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
2271 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
2272
2273/**
2274 * Defers the entire instruction emulation to a C implementation routine and
2275 * returns, taking two arguments in addition to the standard ones.
2276 *
2277 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2278 *
2279 * @param a_fFlags IEM_CIMPL_F_XXX.
2280 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2281 * in the native recompiler.
2282 * @param a_pfnCImpl The pointer to the C routine.
2283 * @param a0 The first extra argument.
2284 * @param a1 The second extra argument.
2285 */
2286#define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
2287 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
2288
2289/**
2290 * Defers the entire instruction emulation to a C implementation routine and
2291 * returns, taking three arguments in addition to the standard ones.
2292 *
2293 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
2294 *
2295 * @param a_fFlags IEM_CIMPL_F_XXX.
2296 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
2297 * in the native recompiler.
2298 * @param a_pfnCImpl The pointer to the C routine.
2299 * @param a0 The first extra argument.
2300 * @param a1 The second extra argument.
2301 * @param a2 The third extra argument.
2302 */
2303#define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
2304 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
2305
2306
2307/**
2308 * Calls a FPU assembly implementation taking one visible argument.
2309 *
2310 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2311 * @param a0 The first extra argument.
2312 */
2313#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
2314 do { \
2315 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
2316 } while (0)
2317
2318/**
2319 * Calls a FPU assembly implementation taking two visible arguments.
2320 *
2321 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2322 * @param a0 The first extra argument.
2323 * @param a1 The second extra argument.
2324 */
2325#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
2326 do { \
2327 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2328 } while (0)
2329
2330/**
2331 * Calls a FPU assembly implementation taking three visible arguments.
2332 *
2333 * @param a_pfnAImpl Pointer to the assembly FPU routine.
2334 * @param a0 The first extra argument.
2335 * @param a1 The second extra argument.
2336 * @param a2 The third extra argument.
2337 */
2338#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2339 do { \
2340 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2341 } while (0)
2342
2343#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
2344 do { \
2345 (a_FpuData).FSW = (a_FSW); \
2346 (a_FpuData).r80Result = *(a_pr80Value); \
2347 } while (0)
2348
2349/** Pushes FPU result onto the stack. */
2350#define IEM_MC_PUSH_FPU_RESULT(a_FpuData, a_uFpuOpcode) \
2351 iemFpuPushResult(pVCpu, &a_FpuData, a_uFpuOpcode)
2352/** Pushes FPU result onto the stack and sets the FPUDP. */
2353#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2354 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2355
2356/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
2357#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo, a_uFpuOpcode) \
2358 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo, a_uFpuOpcode)
2359
2360/** Stores FPU result in a stack register. */
2361#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg, a_uFpuOpcode) \
2362 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2363/** Stores FPU result in a stack register and pops the stack. */
2364#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg, a_uFpuOpcode) \
2365 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
2366/** Stores FPU result in a stack register and sets the FPUDP. */
2367#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2368 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2369/** Stores FPU result in a stack register, sets the FPUDP, and pops the
2370 * stack. */
2371#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2372 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2373
2374/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
2375#define IEM_MC_UPDATE_FPU_OPCODE_IP(a_uFpuOpcode) \
2376 iemFpuUpdateOpcodeAndIp(pVCpu, a_uFpuOpcode)
2377/** Free a stack register (for FFREE and FFREEP). */
2378#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
2379 iemFpuStackFree(pVCpu, a_iStReg)
2380/** Increment the FPU stack pointer. */
2381#define IEM_MC_FPU_STACK_INC_TOP() \
2382 iemFpuStackIncTop(pVCpu)
2383/** Decrement the FPU stack pointer. */
2384#define IEM_MC_FPU_STACK_DEC_TOP() \
2385 iemFpuStackDecTop(pVCpu)
2386
2387/** Updates the FSW, FOP, FPUIP, and FPUCS. */
2388#define IEM_MC_UPDATE_FSW(a_u16FSW, a_uFpuOpcode) \
2389 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2390/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
2391#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW, a_uFpuOpcode) \
2392 iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
2393/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
2394#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2395 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2396/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
2397#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW, a_uFpuOpcode) \
2398 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2399/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
2400 * stack. */
2401#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2402 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2403/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
2404#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW, a_uFpuOpcode) \
2405 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW, a_uFpuOpcode)
2406
2407/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
2408#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst, a_uFpuOpcode) \
2409 iemFpuStackUnderflow(pVCpu, a_iStDst, a_uFpuOpcode)
2410/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2411 * stack. */
2412#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst, a_uFpuOpcode) \
2413 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst, a_uFpuOpcode)
2414/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2415 * FPUDS. */
2416#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2417 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2418/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
2419 * FPUDS. Pops stack. */
2420#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2421 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2422/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
2423 * stack twice. */
2424#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP(a_uFpuOpcode) \
2425 iemFpuStackUnderflowThenPopPop(pVCpu, a_uFpuOpcode)
2426/** Raises a FPU stack underflow exception for an instruction pushing a result
2427 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
2428#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW(a_uFpuOpcode) \
2429 iemFpuStackPushUnderflow(pVCpu, a_uFpuOpcode)
2430/** Raises a FPU stack underflow exception for an instruction pushing a result
2431 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
2432#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO(a_uFpuOpcode) \
2433 iemFpuStackPushUnderflowTwo(pVCpu, a_uFpuOpcode)
2434
2435/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2436 * FPUIP, FPUCS and FOP. */
2437#define IEM_MC_FPU_STACK_PUSH_OVERFLOW(a_uFpuOpcode) \
2438 iemFpuStackPushOverflow(pVCpu, a_uFpuOpcode)
2439/** Raises a FPU stack overflow exception as part of a push attempt. Sets
2440 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
2441#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
2442 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
2443/** Prepares for using the FPU state.
2444 * Ensures that we can use the host FPU in the current context (RC+R0.
2445 * Ensures the guest FPU state in the CPUMCTX is up to date. */
2446#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
2447/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
2448#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
2449/** Actualizes the guest FPU state so it can be accessed and modified. */
2450#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
2451
2452/** Prepares for using the SSE state.
2453 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
2454 * Ensures the guest SSE state in the CPUMCTX is up to date. */
2455#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
2456/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2457#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
2458/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
2459#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
2460
2461/** Prepares for using the AVX state.
2462 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
2463 * Ensures the guest AVX state in the CPUMCTX is up to date.
2464 * @note This will include the AVX512 state too when support for it is added
2465 * due to the zero extending feature of VEX instruction. */
2466#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
2467/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
2468#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
2469/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
2470#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
2471
2472/**
2473 * Calls a MMX assembly implementation taking two visible arguments.
2474 *
2475 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2476 * @param a0 The first extra argument.
2477 * @param a1 The second extra argument.
2478 */
2479#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
2480 do { \
2481 IEM_MC_PREPARE_FPU_USAGE(); \
2482 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
2483 } while (0)
2484
2485/**
2486 * Calls a MMX assembly implementation taking three visible arguments.
2487 *
2488 * @param a_pfnAImpl Pointer to the assembly MMX routine.
2489 * @param a0 The first extra argument.
2490 * @param a1 The second extra argument.
2491 * @param a2 The third extra argument.
2492 */
2493#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2494 do { \
2495 IEM_MC_PREPARE_FPU_USAGE(); \
2496 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
2497 } while (0)
2498
2499
2500/**
2501 * Calls a SSE assembly implementation taking two visible arguments.
2502 *
2503 * @param a_pfnAImpl Pointer to the assembly SSE routine.
2504 * @param a0 The first extra argument.
2505 * @param a1 The second extra argument.
2506 *
2507 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
2508 * which is unmasked in the guest's MXCSR.
2509 */
2510#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
2511 do { \
2512 IEM_MC_PREPARE_SSE_USAGE(); \
2513 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
2514 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
2515 (a0), (a1)); \
2516 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
2517 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
2518 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
2519 { /* probable */ } \
2520 else \
2521 { \
2522 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
2523 return iemRaiseSimdFpException(pVCpu); \
2524 return iemRaiseUndefinedOpcode(pVCpu); \
2525 } \
2526 } while (0)
2527
2528/**
2529 * Calls a SSE assembly implementation taking three visible arguments.
2530 *
2531 * @param a_pfnAImpl Pointer to the assembly SSE routine.
2532 * @param a0 The first extra argument.
2533 * @param a1 The second extra argument.
2534 * @param a2 The third extra argument.
2535 *
2536 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
2537 * which is unmasked in the guest's MXCSR.
2538 */
2539#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2540 do { \
2541 IEM_MC_PREPARE_SSE_USAGE(); \
2542 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
2543 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
2544 (a0), (a1), (a2)); \
2545 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
2546 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
2547 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
2548 { /* probable */ } \
2549 else \
2550 { \
2551 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
2552 return iemRaiseSimdFpException(pVCpu); \
2553 return iemRaiseUndefinedOpcode(pVCpu); \
2554 } \
2555 } while (0)
2556
2557
2558/**
2559 * Calls a AVX assembly implementation taking two visible arguments.
2560 *
2561 * There is one implicit zero'th argument, a pointer to the extended state.
2562 *
2563 * @param a_pfnAImpl Pointer to the assembly AVX routine.
2564 * @param a0 The first extra argument.
2565 * @param a1 The second extra argument.
2566 *
2567 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
2568 * which is unmasked in the guest's MXCSR.
2569 */
2570#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a0, a1) \
2571 do { \
2572 IEM_MC_PREPARE_AVX_USAGE(); \
2573 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
2574 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
2575 (a0), (a1)); \
2576 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
2577 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
2578 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
2579 { /* probable */ } \
2580 else \
2581 { \
2582 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
2583 return iemRaiseSimdFpException(pVCpu); \
2584 return iemRaiseUndefinedOpcode(pVCpu); \
2585 } \
2586 } while (0)
2587
2588/**
2589 * Calls a AVX assembly implementation taking three visible arguments.
2590 *
2591 * There is one implicit zero'th argument, a pointer to the extended state.
2592 *
2593 * @param a_pfnAImpl Pointer to the assembly AVX routine.
2594 * @param a0 The first extra argument.
2595 * @param a1 The second extra argument.
2596 * @param a2 The third extra argument.
2597 *
2598 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
2599 * which is unmasked in the guest's MXCSR.
2600 */
2601#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
2602 do { \
2603 IEM_MC_PREPARE_AVX_USAGE(); \
2604 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
2605 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
2606 (a0), (a1), (a2)); \
2607 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
2608 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
2609 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
2610 { /* probable */ } \
2611 else \
2612 { \
2613 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
2614 return iemRaiseSimdFpException(pVCpu); \
2615 return iemRaiseUndefinedOpcode(pVCpu); \
2616 } \
2617 } while (0)
2618
2619/** @note Not for IOPL or IF testing. */
2620#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
2621/** @note Not for IOPL or IF testing. */
2622#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
2623/** @note Not for IOPL or IF testing. */
2624#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
2625/** @note Not for IOPL or IF testing. */
2626#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
2627/** @note Not for IOPL or IF testing. */
2628#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
2629 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2630 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2631/** @note Not for IOPL or IF testing. */
2632#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
2633 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2634 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2635/** @note Not for IOPL or IF testing. */
2636#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
2637 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
2638 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2639 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2640/** @note Not for IOPL or IF testing. */
2641#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
2642 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
2643 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
2644 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
2645#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
2646#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
2647#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
2648#define IEM_MC_IF_CX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.cx != 1) {
2649#define IEM_MC_IF_ECX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.ecx != 1) {
2650#define IEM_MC_IF_RCX_IS_NOT_ONE() if (pVCpu->cpum.GstCtx.rcx != 1) {
2651/** @note Not for IOPL or IF testing. */
2652#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
2653 if ( pVCpu->cpum.GstCtx.cx != 1 \
2654 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2655/** @note Not for IOPL or IF testing. */
2656#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
2657 if ( pVCpu->cpum.GstCtx.ecx != 1 \
2658 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2659/** @note Not for IOPL or IF testing. */
2660#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
2661 if ( pVCpu->cpum.GstCtx.rcx != 1 \
2662 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2663/** @note Not for IOPL or IF testing. */
2664#define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
2665 if ( pVCpu->cpum.GstCtx.cx != 1 \
2666 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2667/** @note Not for IOPL or IF testing. */
2668#define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
2669 if ( pVCpu->cpum.GstCtx.ecx != 1 \
2670 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2671/** @note Not for IOPL or IF testing. */
2672#define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
2673 if ( pVCpu->cpum.GstCtx.rcx != 1 \
2674 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
2675#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
2676#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
2677
2678#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
2679 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
2680#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
2681 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
2682#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
2683 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
2684#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
2685 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
2686#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
2687 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
2688#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
2689 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
2690#define IEM_MC_IF_FCW_IM() \
2691 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
2692
2693#define IEM_MC_ELSE() } else {
2694#define IEM_MC_ENDIF() } do {} while (0)
2695
2696
2697/** Recompiler debugging: Flush guest register shadow copies. */
2698#define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush) ((void)0)
2699
2700/** Recompiler liveness info: input GPR */
2701#define IEM_MC_LIVENESS_GREG_INPUT(a_iGReg) ((void)0)
2702/** Recompiler liveness info: clobbered GPR */
2703#define IEM_MC_LIVENESS_GREG_CLOBBER(a_iGReg) ((void)0)
2704/** Recompiler liveness info: modified GPR register (i.e. input & output) */
2705#define IEM_MC_LIVENESS_GREG_MODIFY(a_iGReg) ((void)0)
2706
2707/** Recompiler liveness info: input MM register */
2708#define IEM_MC_LIVENESS_MREG_INPUT(a_iMReg) ((void)0)
2709/** Recompiler liveness info: clobbered MM register */
2710#define IEM_MC_LIVENESS_MREG_CLOBBER(a_iMReg) ((void)0)
2711/** Recompiler liveness info: modified MM register (i.e. input & output) */
2712#define IEM_MC_LIVENESS_MREG_MODIFY(a_iMReg) ((void)0)
2713
2714/** Recompiler liveness info: input SSE register */
2715#define IEM_MC_LIVENESS_XREG_INPUT(a_iXReg) ((void)0)
2716/** Recompiler liveness info: clobbered SSE register */
2717#define IEM_MC_LIVENESS_XREG_CLOBBER(a_iXReg) ((void)0)
2718/** Recompiler liveness info: modified SSE register (i.e. input & output) */
2719#define IEM_MC_LIVENESS_XREG_MODIFY(a_iXReg) ((void)0)
2720
2721/** Recompiler liveness info: input MXCSR */
2722#define IEM_MC_LIVENESS_MXCSR_INPUT() ((void)0)
2723/** Recompiler liveness info: clobbered MXCSR */
2724#define IEM_MC_LIVENESS_MXCSR_CLOBBER() ((void)0)
2725/** Recompiler liveness info: modified MXCSR (i.e. input & output) */
2726#define IEM_MC_LIVENESS_MXCSR_MODIFY() ((void)0)
2727
2728/** @todo add more as needed. */
2729
2730/** @} */
2731
2732#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
2733
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette