VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 96751

Last change on this file since 96751 was 96751, checked in by vboxsync, 2 years ago

VMM/IEM: Implement [v]comiss/[v]ucomiss/[v]comisd/[v]ucomisd instructions, bugref:9898 [Use already existing IEM_MC_COMMIT_EFLAGS() instead of introducing another one]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 82.3 KB
Line 
1/* $Id: IEMMc.h 96751 2022-09-15 18:14:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
43#define IEM_MC_END() }
44#define IEM_MC_PAUSE() do {} while (0)
45#define IEM_MC_CONTINUE() do {} while (0)
46
47/** Internal macro. */
48#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
49 do \
50 { \
51 VBOXSTRICTRC rcStrict2 = a_Expr; \
52 if (rcStrict2 != VINF_SUCCESS) \
53 return rcStrict2; \
54 } while (0)
55
56
57#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
58#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
59#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
60#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
61#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
62#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
63#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
64#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
65#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
66 do { \
67 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
68 return iemRaiseDeviceNotAvailable(pVCpu); \
69 } while (0)
70#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
71 do { \
72 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
73 return iemRaiseDeviceNotAvailable(pVCpu); \
74 } while (0)
75#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
76 do { \
77 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
78 return iemRaiseMathFault(pVCpu); \
79 } while (0)
80#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
81 do { \
82 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
83 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
84 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
85 return iemRaiseUndefinedOpcode(pVCpu); \
86 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
87 return iemRaiseDeviceNotAvailable(pVCpu); \
88 } while (0)
89#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
90 do { \
91 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
92 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
93 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
94 return iemRaiseUndefinedOpcode(pVCpu); \
95 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
96 return iemRaiseDeviceNotAvailable(pVCpu); \
97 } while (0)
98#define IEM_MC_MAYBE_RAISE_SSE42_RELATED_XCPT() \
99 do { \
100 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
101 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
102 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse42) \
103 return iemRaiseUndefinedOpcode(pVCpu); \
104 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
105 return iemRaiseDeviceNotAvailable(pVCpu); \
106 } while (0)
107#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
108 do { \
109 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
110 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
111 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
112 return iemRaiseUndefinedOpcode(pVCpu); \
113 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
114 return iemRaiseDeviceNotAvailable(pVCpu); \
115 } while (0)
116#define IEM_MC_MAYBE_RAISE_SSSE3_RELATED_XCPT() \
117 do { \
118 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
119 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
120 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSsse3) \
121 return iemRaiseUndefinedOpcode(pVCpu); \
122 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
123 return iemRaiseDeviceNotAvailable(pVCpu); \
124 } while (0)
125#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
126 do { \
127 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
128 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
129 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
130 return iemRaiseUndefinedOpcode(pVCpu); \
131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
132 return iemRaiseDeviceNotAvailable(pVCpu); \
133 } while (0)
134#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
135 do { \
136 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
137 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
138 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
139 return iemRaiseUndefinedOpcode(pVCpu); \
140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
141 return iemRaiseDeviceNotAvailable(pVCpu); \
142 } while (0)
143#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
144 do { \
145 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
148 return iemRaiseUndefinedOpcode(pVCpu); \
149 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
150 return iemRaiseDeviceNotAvailable(pVCpu); \
151 } while (0)
152#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
153 do { \
154 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
156 return iemRaiseUndefinedOpcode(pVCpu); \
157 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
158 return iemRaiseDeviceNotAvailable(pVCpu); \
159 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
160 return iemRaiseMathFault(pVCpu); \
161 } while (0)
162#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_EX(a_fSupported) \
163 do { \
164 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
165 || !(a_fSupported)) \
166 return iemRaiseUndefinedOpcode(pVCpu); \
167 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
168 return iemRaiseDeviceNotAvailable(pVCpu); \
169 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
170 return iemRaiseMathFault(pVCpu); \
171 } while (0)
172#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
173 do { \
174 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
175 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
176 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
177 return iemRaiseUndefinedOpcode(pVCpu); \
178 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
179 return iemRaiseDeviceNotAvailable(pVCpu); \
180 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
181 return iemRaiseMathFault(pVCpu); \
182 } while (0)
183#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
184 do { \
185 if (pVCpu->iem.s.uCpl != 0) \
186 return iemRaiseGeneralProtectionFault0(pVCpu); \
187 } while (0)
188#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
189 do { \
190 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
191 else return iemRaiseGeneralProtectionFault0(pVCpu); \
192 } while (0)
193#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
194 do { \
195 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
196 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
197 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
198 return iemRaiseUndefinedOpcode(pVCpu); \
199 } while (0)
200#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
201 do { \
202 if (!IEM_IS_CANONICAL(a_u64Addr)) \
203 return iemRaiseGeneralProtectionFault0(pVCpu); \
204 } while (0)
205#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
206 do { \
207 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
208 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) \
209 { \
210 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
211 return iemRaiseSimdFpException(pVCpu); \
212 else \
213 return iemRaiseUndefinedOpcode(pVCpu); \
214 } \
215 } while (0)
216#define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
217 do { \
218 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
219 return iemRaiseSimdFpException(pVCpu); \
220 else \
221 return iemRaiseUndefinedOpcode(pVCpu); \
222 } while (0)
223#define IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT() \
224 do { \
225 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
226 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
227 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fPclMul) \
228 return iemRaiseUndefinedOpcode(pVCpu); \
229 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
230 return iemRaiseDeviceNotAvailable(pVCpu); \
231 } while (0)
232
233
234#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
235#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
236#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
237#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
238#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
239#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
240#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
241 uint32_t a_Name; \
242 uint32_t *a_pName = &a_Name
243#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
244 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
245
246#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
247#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
248
249#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
250#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
251#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
252#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
253#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
254#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
255#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
256#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
257#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
258#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
259#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
260#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
261#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
262#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
263#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
264#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
265#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
266#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
267 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
268 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
269 } while (0)
270#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
271 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
272 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
273 } while (0)
274#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
275 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
276 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
277 } while (0)
278/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
279#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
280 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
281 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
282 } while (0)
283#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
284 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
285 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
286 } while (0)
287/** @note Not for IOPL or IF testing or modification. */
288#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
289#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
290#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
291#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
292
293#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
294#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
295#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
296#define IEM_MC_STORE_GREG_I32(a_iGReg, a_i32Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (int64_t)(a_i32Value) /* Sign extension. */
297#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
298#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
299#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
300#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
301#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
302#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
303#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
304#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
305/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
306#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
307 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
308 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
309 } while (0)
310#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
311 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
312 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
313 } while (0)
314#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
315 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
316
317
318#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
319#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
320/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
321 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
322#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
323#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
324#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
325#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
326#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
327#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
328/** @note Not for IOPL or IF testing or modification. */
329#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
330#define IEM_MC_REF_MXCSR(a_pfMxcsr) (a_pfMxcsr) = &pVCpu->cpum.GstCtx.XState.x87.MXCSR
331
332#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
333#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
334#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
335 do { \
336 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
337 *pu32Reg += (a_u32Value); \
338 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
339 } while (0)
340#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
341
342#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
343#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
344#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
345 do { \
346 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
347 *pu32Reg -= (a_u32Value); \
348 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
349 } while (0)
350#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
351#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
352
353#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
354#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
355#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
356#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
357#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
358#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
359#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
360
361#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
362#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
363#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
364#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
365
366#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
367#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
368#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
369
370#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
371#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
372#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
373
374#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
375#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
376#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
377
378#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
379#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
380#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
381
382#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
383
384#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
385
386#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
387#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
388#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
389 do { \
390 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
391 *pu32Reg &= (a_u32Value); \
392 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
393 } while (0)
394#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
395
396#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
397#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
398#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
399 do { \
400 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
401 *pu32Reg |= (a_u32Value); \
402 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
403 } while (0)
404#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
405
406
407/** @note Not for IOPL or IF modification. */
408#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
409/** @note Not for IOPL or IF modification. */
410#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
411/** @note Not for IOPL or IF modification. */
412#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
413
414#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
415
416/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
417#define IEM_MC_FPU_TO_MMX_MODE() do { \
418 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
419 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
420 } while (0)
421
422/** Switches the FPU state from MMX mode (FTW=0xffff). */
423#define IEM_MC_FPU_FROM_MMX_MODE() do { \
424 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
425 } while (0)
426
427#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
428 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
429#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
430 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
431#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
432 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
433 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
434 } while (0)
435#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
436 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
437 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
438 } while (0)
439#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
440 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
441#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
442 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
443#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
444 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
445#define IEM_MC_MODIFIED_MREG(a_iMReg) \
446 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
447#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
448 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
449
450#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
451 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
452 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
453 } while (0)
454#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
455 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
456#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
457 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
458#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
459 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
460#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
461 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
462 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
463 } while (0)
464#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
465 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
466#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
467 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
468 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
469 } while (0)
470#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
471 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
472#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
473 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
474#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
475 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
476#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
477 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
478 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
479 } while (0)
480#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
481 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
482#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
483 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
484#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
485 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
486#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
487 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
488#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
489 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
490#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
491 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
492#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
493 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
494#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
495 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
496#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
497 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
498 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
499 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
500 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
501 } while (0)
502
503#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
504 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
505 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
506 } while (0)
507#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
508 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
509 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
510 } while (0)
511#define IEM_MC_FETCH_YREG_2ND_U64(a_u64Dst, a_iYRegSrc) \
512 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
513 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
514 } while (0)
515#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
516 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
517 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
518 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
519 } while (0)
520#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
521 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
522 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
523 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
524 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
525 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
526 } while (0)
527
528#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
529#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
530 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
531 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
532 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
533 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
534 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
535 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
536 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
537 } while (0)
538#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
539 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
540 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
541 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
542 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
543 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
544 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
545 } while (0)
546#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
547 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
548 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
549 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
550 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
551 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
552 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
553 } while (0)
554#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
555 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
556 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
557 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
558 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
559 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
560 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
561 } while (0)
562
563#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
564 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
565#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
566 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
567#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
568 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
569#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
570 do { uintptr_t const iYRegTmp = (a_iYReg); \
571 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
572 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
573 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
574 } while (0)
575
576#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
577 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
578 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
579 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
580 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
581 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
582 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
583 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
584 } while (0)
585#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
586 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
587 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
588 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
589 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
590 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
591 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
592 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
593 } while (0)
594#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
595 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
596 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
597 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
598 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
599 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
600 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
601 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
602 } while (0)
603
604#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
605 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
606 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
607 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
608 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
609 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
610 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
611 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
612 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
613 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
614 } while (0)
615#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
616 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
617 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
618 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
619 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
620 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
621 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
622 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
623 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
624 } while (0)
625#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
626 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
627 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
628 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
629 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
630 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
631 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
632 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
633 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
634 } while (0)
635#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
636 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
637 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
638 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
639 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
640 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
641 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
642 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
643 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
644 } while (0)
645#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
646 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
647 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
648 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
649 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
650 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
651 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
652 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
653 } while (0)
654#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
655 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
656 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
657 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
658 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
659 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
660 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
661 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
662 } while (0)
663
664#ifndef IEM_WITH_SETJMP
665# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
667# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
669# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
671#else
672# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
673 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
674# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
675 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
676# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
677 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
678#endif
679
680#ifndef IEM_WITH_SETJMP
681# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
683# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
685# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
687#else
688# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
689 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
690# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
691 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
692# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
693 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
694#endif
695
696#ifndef IEM_WITH_SETJMP
697# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
699# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
701# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
703#else
704# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
705 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
706# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
707 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
708# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
709 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
710#endif
711
712#ifdef SOME_UNUSED_FUNCTION
713# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
715#endif
716
717#ifndef IEM_WITH_SETJMP
718# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
720# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
722# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
724# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
726#else
727# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
728 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
729# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
730 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
731# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
732 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
733# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
734 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
735#endif
736
737#ifndef IEM_WITH_SETJMP
738# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
740# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
742# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
744# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
746#else
747# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
748 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
749# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
750 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
751# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
752 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
753# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
754 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
755#endif
756
757#ifndef IEM_WITH_SETJMP
758# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
760# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
762# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
764
765# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
767# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
769# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
771# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_XmmDst).au32[(a_iDWord)], (a_iSeg), (a_GCPtrMem)))
773# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_XmmDst).au64[(a_iQWord)], (a_iSeg), (a_GCPtrMem)))
775#else
776# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
777 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
778# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
779 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
780# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
781 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
782
783# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
784 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
785# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
786 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
787# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
788 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
789# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
790 (a_XmmDst).au32[(a_iDWord)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
791# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
792 (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
793#endif
794
795#ifndef IEM_WITH_SETJMP
796# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
797 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
798# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
800# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
802
803# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
805# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
806 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
807# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
809#else
810# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
811 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
812# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
813 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
814# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
815 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
816
817# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
818 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
819# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
820 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
821# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
822 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
823#endif
824
825
826
827#ifndef IEM_WITH_SETJMP
828# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
829 do { \
830 uint8_t u8Tmp; \
831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
832 (a_u16Dst) = u8Tmp; \
833 } while (0)
834# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
835 do { \
836 uint8_t u8Tmp; \
837 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
838 (a_u32Dst) = u8Tmp; \
839 } while (0)
840# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
841 do { \
842 uint8_t u8Tmp; \
843 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
844 (a_u64Dst) = u8Tmp; \
845 } while (0)
846# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
847 do { \
848 uint16_t u16Tmp; \
849 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
850 (a_u32Dst) = u16Tmp; \
851 } while (0)
852# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
853 do { \
854 uint16_t u16Tmp; \
855 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
856 (a_u64Dst) = u16Tmp; \
857 } while (0)
858# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
859 do { \
860 uint32_t u32Tmp; \
861 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
862 (a_u64Dst) = u32Tmp; \
863 } while (0)
864#else /* IEM_WITH_SETJMP */
865# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
866 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
867# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
868 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
869# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
870 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
871# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
872 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
873# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
874 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
875# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
876 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
877#endif /* IEM_WITH_SETJMP */
878
879#ifndef IEM_WITH_SETJMP
880# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
881 do { \
882 uint8_t u8Tmp; \
883 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
884 (a_u16Dst) = (int8_t)u8Tmp; \
885 } while (0)
886# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
887 do { \
888 uint8_t u8Tmp; \
889 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
890 (a_u32Dst) = (int8_t)u8Tmp; \
891 } while (0)
892# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
893 do { \
894 uint8_t u8Tmp; \
895 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
896 (a_u64Dst) = (int8_t)u8Tmp; \
897 } while (0)
898# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
899 do { \
900 uint16_t u16Tmp; \
901 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
902 (a_u32Dst) = (int16_t)u16Tmp; \
903 } while (0)
904# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
905 do { \
906 uint16_t u16Tmp; \
907 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
908 (a_u64Dst) = (int16_t)u16Tmp; \
909 } while (0)
910# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
911 do { \
912 uint32_t u32Tmp; \
913 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
914 (a_u64Dst) = (int32_t)u32Tmp; \
915 } while (0)
916#else /* IEM_WITH_SETJMP */
917# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
918 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
919# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
920 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
921# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
922 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
923# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
924 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
925# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
926 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
927# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
928 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
929#endif /* IEM_WITH_SETJMP */
930
931#ifndef IEM_WITH_SETJMP
932# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
933 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
934# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
935 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
936# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
937 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
938# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
939 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
940#else
941# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
942 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
943# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
944 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
945# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
946 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
947# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
948 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
949#endif
950
951#ifndef IEM_WITH_SETJMP
952# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
953 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
954# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
955 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
956# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
957 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
958# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
959 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
960#else
961# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
962 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
963# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
964 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
965# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
966 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
967# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
968 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
969#endif
970
971#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
972#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
973#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
974#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
975#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
976#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
977#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
978 do { \
979 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
980 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
981 } while (0)
982#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
983 do { \
984 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
985 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
986 } while (0)
987
988#ifndef IEM_WITH_SETJMP
989# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
990 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
991# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
992 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
993#else
994# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
995 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
996# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
997 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
998#endif
999
1000#ifndef IEM_WITH_SETJMP
1001# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1002 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1003# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1004 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1005#else
1006# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1007 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1008# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1009 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1010#endif
1011
1012
1013#define IEM_MC_PUSH_U16(a_u16Value) \
1014 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1015#define IEM_MC_PUSH_U32(a_u32Value) \
1016 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1017#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
1018 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
1019#define IEM_MC_PUSH_U64(a_u64Value) \
1020 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1021
1022#define IEM_MC_POP_U16(a_pu16Value) \
1023 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
1024#define IEM_MC_POP_U32(a_pu32Value) \
1025 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
1026#define IEM_MC_POP_U64(a_pu64Value) \
1027 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
1028
1029/** Maps guest memory for direct or bounce buffered access.
1030 * The purpose is to pass it to an operand implementation, thus the a_iArg.
1031 * @remarks May return.
1032 */
1033#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
1034 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \
1035 (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
1036
1037/** Maps guest memory for direct or bounce buffered access.
1038 * The purpose is to pass it to an operand implementation, thus the a_iArg.
1039 * @remarks May return.
1040 */
1041#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_cbAlign, a_iArg) \
1042 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \
1043 (a_GCPtrMem), (a_fAccess), (a_cbAlign)))
1044
1045/** Commits the memory and unmaps the guest memory.
1046 * @remarks May return.
1047 */
1048#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
1049 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
1050
1051/** Commits the memory and unmaps the guest memory unless the FPU status word
1052 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
1053 * that would cause FLD not to store.
1054 *
1055 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
1056 * store, while \#P will not.
1057 *
1058 * @remarks May in theory return - for now.
1059 */
1060#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
1061 do { \
1062 if ( !(a_u16FSW & X86_FSW_ES) \
1063 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
1064 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
1065 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
1066 } while (0)
1067
1068/** Calculate efficient address from R/M. */
1069#ifndef IEM_WITH_SETJMP
1070# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
1071 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
1072#else
1073# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
1074 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
1075#endif
1076
1077#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
1078#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
1079#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
1080#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
1081#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
1082#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
1083#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
1084
1085/**
1086 * Defers the rest of the instruction emulation to a C implementation routine
1087 * and returns, only taking the standard parameters.
1088 *
1089 * @param a_pfnCImpl The pointer to the C routine.
1090 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
1091 */
1092#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
1093
1094/**
1095 * Defers the rest of instruction emulation to a C implementation routine and
1096 * returns, taking one argument in addition to the standard ones.
1097 *
1098 * @param a_pfnCImpl The pointer to the C routine.
1099 * @param a0 The argument.
1100 */
1101#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
1102
1103/**
1104 * Defers the rest of the instruction emulation to a C implementation routine
1105 * and returns, taking two arguments in addition to the standard ones.
1106 *
1107 * @param a_pfnCImpl The pointer to the C routine.
1108 * @param a0 The first extra argument.
1109 * @param a1 The second extra argument.
1110 */
1111#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
1112
1113/**
1114 * Defers the rest of the instruction emulation to a C implementation routine
1115 * and returns, taking three arguments in addition to the standard ones.
1116 *
1117 * @param a_pfnCImpl The pointer to the C routine.
1118 * @param a0 The first extra argument.
1119 * @param a1 The second extra argument.
1120 * @param a2 The third extra argument.
1121 */
1122#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
1123
1124/**
1125 * Defers the rest of the instruction emulation to a C implementation routine
1126 * and returns, taking four arguments in addition to the standard ones.
1127 *
1128 * @param a_pfnCImpl The pointer to the C routine.
1129 * @param a0 The first extra argument.
1130 * @param a1 The second extra argument.
1131 * @param a2 The third extra argument.
1132 * @param a3 The fourth extra argument.
1133 */
1134#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
1135
1136/**
1137 * Defers the rest of the instruction emulation to a C implementation routine
1138 * and returns, taking two arguments in addition to the standard ones.
1139 *
1140 * @param a_pfnCImpl The pointer to the C routine.
1141 * @param a0 The first extra argument.
1142 * @param a1 The second extra argument.
1143 * @param a2 The third extra argument.
1144 * @param a3 The fourth extra argument.
1145 * @param a4 The fifth extra argument.
1146 */
1147#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
1148
1149/**
1150 * Defers the entire instruction emulation to a C implementation routine and
1151 * returns, only taking the standard parameters.
1152 *
1153 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1154 *
1155 * @param a_pfnCImpl The pointer to the C routine.
1156 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
1157 */
1158#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
1159
1160/**
1161 * Defers the entire instruction emulation to a C implementation routine and
1162 * returns, taking one argument in addition to the standard ones.
1163 *
1164 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1165 *
1166 * @param a_pfnCImpl The pointer to the C routine.
1167 * @param a0 The argument.
1168 */
1169#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
1170
1171/**
1172 * Defers the entire instruction emulation to a C implementation routine and
1173 * returns, taking two arguments in addition to the standard ones.
1174 *
1175 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1176 *
1177 * @param a_pfnCImpl The pointer to the C routine.
1178 * @param a0 The first extra argument.
1179 * @param a1 The second extra argument.
1180 */
1181#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
1182
1183/**
1184 * Defers the entire instruction emulation to a C implementation routine and
1185 * returns, taking three arguments in addition to the standard ones.
1186 *
1187 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1188 *
1189 * @param a_pfnCImpl The pointer to the C routine.
1190 * @param a0 The first extra argument.
1191 * @param a1 The second extra argument.
1192 * @param a2 The third extra argument.
1193 */
1194#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
1195
1196/**
1197 * Calls a FPU assembly implementation taking one visible argument.
1198 *
1199 * @param a_pfnAImpl Pointer to the assembly FPU routine.
1200 * @param a0 The first extra argument.
1201 */
1202#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
1203 do { \
1204 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
1205 } while (0)
1206
1207/**
1208 * Calls a FPU assembly implementation taking two visible arguments.
1209 *
1210 * @param a_pfnAImpl Pointer to the assembly FPU routine.
1211 * @param a0 The first extra argument.
1212 * @param a1 The second extra argument.
1213 */
1214#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
1215 do { \
1216 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1217 } while (0)
1218
1219/**
1220 * Calls a FPU assembly implementation taking three visible arguments.
1221 *
1222 * @param a_pfnAImpl Pointer to the assembly FPU routine.
1223 * @param a0 The first extra argument.
1224 * @param a1 The second extra argument.
1225 * @param a2 The third extra argument.
1226 */
1227#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1228 do { \
1229 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1230 } while (0)
1231
1232#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
1233 do { \
1234 (a_FpuData).FSW = (a_FSW); \
1235 (a_FpuData).r80Result = *(a_pr80Value); \
1236 } while (0)
1237
1238/** Pushes FPU result onto the stack. */
1239#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
1240 iemFpuPushResult(pVCpu, &a_FpuData)
1241/** Pushes FPU result onto the stack and sets the FPUDP. */
1242#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
1243 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
1244
1245/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
1246#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
1247 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
1248
1249/** Stores FPU result in a stack register. */
1250#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
1251 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
1252/** Stores FPU result in a stack register and pops the stack. */
1253#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
1254 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
1255/** Stores FPU result in a stack register and sets the FPUDP. */
1256#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
1257 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
1258/** Stores FPU result in a stack register, sets the FPUDP, and pops the
1259 * stack. */
1260#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
1261 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
1262
1263/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
1264#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
1265 iemFpuUpdateOpcodeAndIp(pVCpu)
1266/** Free a stack register (for FFREE and FFREEP). */
1267#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
1268 iemFpuStackFree(pVCpu, a_iStReg)
1269/** Increment the FPU stack pointer. */
1270#define IEM_MC_FPU_STACK_INC_TOP() \
1271 iemFpuStackIncTop(pVCpu)
1272/** Decrement the FPU stack pointer. */
1273#define IEM_MC_FPU_STACK_DEC_TOP() \
1274 iemFpuStackDecTop(pVCpu)
1275
1276/** Updates the FSW, FOP, FPUIP, and FPUCS. */
1277#define IEM_MC_UPDATE_FSW(a_u16FSW) \
1278 iemFpuUpdateFSW(pVCpu, a_u16FSW)
1279/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
1280#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
1281 iemFpuUpdateFSW(pVCpu, a_u16FSW)
1282/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
1283#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
1284 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
1285/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
1286#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
1287 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
1288/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
1289 * stack. */
1290#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
1291 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
1292/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
1293#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
1294 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
1295
1296/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
1297#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
1298 iemFpuStackUnderflow(pVCpu, a_iStDst)
1299/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
1300 * stack. */
1301#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
1302 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
1303/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
1304 * FPUDS. */
1305#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
1306 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
1307/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
1308 * FPUDS. Pops stack. */
1309#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
1310 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
1311/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
1312 * stack twice. */
1313#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
1314 iemFpuStackUnderflowThenPopPop(pVCpu)
1315/** Raises a FPU stack underflow exception for an instruction pushing a result
1316 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
1317#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
1318 iemFpuStackPushUnderflow(pVCpu)
1319/** Raises a FPU stack underflow exception for an instruction pushing a result
1320 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
1321#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
1322 iemFpuStackPushUnderflowTwo(pVCpu)
1323
1324/** Raises a FPU stack overflow exception as part of a push attempt. Sets
1325 * FPUIP, FPUCS and FOP. */
1326#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
1327 iemFpuStackPushOverflow(pVCpu)
1328/** Raises a FPU stack overflow exception as part of a push attempt. Sets
1329 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
1330#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
1331 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
1332/** Prepares for using the FPU state.
1333 * Ensures that we can use the host FPU in the current context (RC+R0.
1334 * Ensures the guest FPU state in the CPUMCTX is up to date. */
1335#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
1336/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
1337#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
1338/** Actualizes the guest FPU state so it can be accessed and modified. */
1339#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
1340
1341/** Stores SSE SIMD result updating MXCSR. */
1342#define IEM_MC_STORE_SSE_RESULT(a_SseData, a_iXmmReg) \
1343 iemSseStoreResult(pVCpu, &a_SseData, a_iXmmReg)
1344/** Updates MXCSR. */
1345#define IEM_MC_SSE_UPDATE_MXCSR(a_fMxcsr) \
1346 iemSseUpdateMxcsr(pVCpu, a_fMxcsr)
1347
1348/** Prepares for using the SSE state.
1349 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
1350 * Ensures the guest SSE state in the CPUMCTX is up to date. */
1351#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
1352/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
1353#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
1354/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
1355#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
1356
1357/** Prepares for using the AVX state.
1358 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
1359 * Ensures the guest AVX state in the CPUMCTX is up to date.
1360 * @note This will include the AVX512 state too when support for it is added
1361 * due to the zero extending feature of VEX instruction. */
1362#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
1363/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
1364#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
1365/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
1366#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
1367
1368/**
1369 * Calls a MMX assembly implementation taking two visible arguments.
1370 *
1371 * @param a_pfnAImpl Pointer to the assembly MMX routine.
1372 * @param a0 The first extra argument.
1373 * @param a1 The second extra argument.
1374 */
1375#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
1376 do { \
1377 IEM_MC_PREPARE_FPU_USAGE(); \
1378 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1379 } while (0)
1380
1381/**
1382 * Calls a MMX assembly implementation taking three visible arguments.
1383 *
1384 * @param a_pfnAImpl Pointer to the assembly MMX routine.
1385 * @param a0 The first extra argument.
1386 * @param a1 The second extra argument.
1387 * @param a2 The third extra argument.
1388 */
1389#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1390 do { \
1391 IEM_MC_PREPARE_FPU_USAGE(); \
1392 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1393 } while (0)
1394
1395
1396/**
1397 * Calls a SSE assembly implementation taking two visible arguments.
1398 *
1399 * @param a_pfnAImpl Pointer to the assembly SSE routine.
1400 * @param a0 The first extra argument.
1401 * @param a1 The second extra argument.
1402 */
1403#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
1404 do { \
1405 IEM_MC_PREPARE_SSE_USAGE(); \
1406 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1407 } while (0)
1408
1409/**
1410 * Calls a SSE assembly implementation taking three visible arguments.
1411 *
1412 * @param a_pfnAImpl Pointer to the assembly SSE routine.
1413 * @param a0 The first extra argument.
1414 * @param a1 The second extra argument.
1415 * @param a2 The third extra argument.
1416 */
1417#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1418 do { \
1419 IEM_MC_PREPARE_SSE_USAGE(); \
1420 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1421 } while (0)
1422
1423
1424/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
1425 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
1426#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
1427 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
1428
1429/**
1430 * Calls a AVX assembly implementation taking two visible arguments.
1431 *
1432 * There is one implicit zero'th argument, a pointer to the extended state.
1433 *
1434 * @param a_pfnAImpl Pointer to the assembly AVX routine.
1435 * @param a1 The first extra argument.
1436 * @param a2 The second extra argument.
1437 */
1438#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
1439 do { \
1440 IEM_MC_PREPARE_AVX_USAGE(); \
1441 a_pfnAImpl(pXState, (a1), (a2)); \
1442 } while (0)
1443
1444/**
1445 * Calls a AVX assembly implementation taking three visible arguments.
1446 *
1447 * There is one implicit zero'th argument, a pointer to the extended state.
1448 *
1449 * @param a_pfnAImpl Pointer to the assembly AVX routine.
1450 * @param a1 The first extra argument.
1451 * @param a2 The second extra argument.
1452 * @param a3 The third extra argument.
1453 */
1454#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
1455 do { \
1456 IEM_MC_PREPARE_AVX_USAGE(); \
1457 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
1458 } while (0)
1459
1460/** @note Not for IOPL or IF testing. */
1461#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
1462/** @note Not for IOPL or IF testing. */
1463#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
1464/** @note Not for IOPL or IF testing. */
1465#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
1466/** @note Not for IOPL or IF testing. */
1467#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
1468/** @note Not for IOPL or IF testing. */
1469#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
1470 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1471 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1472/** @note Not for IOPL or IF testing. */
1473#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
1474 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1475 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1476/** @note Not for IOPL or IF testing. */
1477#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
1478 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
1479 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1480 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1481/** @note Not for IOPL or IF testing. */
1482#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
1483 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
1484 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1485 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1486#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
1487#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
1488#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
1489/** @note Not for IOPL or IF testing. */
1490#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
1491 if ( pVCpu->cpum.GstCtx.cx != 0 \
1492 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1493/** @note Not for IOPL or IF testing. */
1494#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
1495 if ( pVCpu->cpum.GstCtx.ecx != 0 \
1496 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1497/** @note Not for IOPL or IF testing. */
1498#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
1499 if ( pVCpu->cpum.GstCtx.rcx != 0 \
1500 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1501/** @note Not for IOPL or IF testing. */
1502#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
1503 if ( pVCpu->cpum.GstCtx.cx != 0 \
1504 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1505/** @note Not for IOPL or IF testing. */
1506#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
1507 if ( pVCpu->cpum.GstCtx.ecx != 0 \
1508 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1509/** @note Not for IOPL or IF testing. */
1510#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
1511 if ( pVCpu->cpum.GstCtx.rcx != 0 \
1512 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1513#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
1514#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
1515
1516#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
1517 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW, a_iSt)].r80; } while (0)
1518#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
1519 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
1520#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
1521 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
1522#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
1523 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
1524#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
1525 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
1526#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
1527 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
1528#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
1529 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
1530#define IEM_MC_IF_FCW_IM() \
1531 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
1532#define IEM_MC_IF_MXCSR_XCPT_PENDING() \
1533 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
1534 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) {
1535
1536#define IEM_MC_ELSE() } else {
1537#define IEM_MC_ENDIF() } do {} while (0)
1538
1539/** @} */
1540
1541#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
1542
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette