VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 97112

Last change on this file since 97112 was 97112, checked in by vboxsync, 2 years ago

IEM: CVTxx2SI zero extends result when writing 32-bit register, just like everyone else.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 83.3 KB
Line 
1/* $Id: IEMMc.h 97112 2022-10-12 13:22:45Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * @{
41 */
42#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
43#define IEM_MC_END() }
44#define IEM_MC_PAUSE() do {} while (0)
45#define IEM_MC_CONTINUE() do {} while (0)
46
47/** Internal macro. */
48#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
49 do \
50 { \
51 VBOXSTRICTRC rcStrict2 = a_Expr; \
52 if (rcStrict2 != VINF_SUCCESS) \
53 return rcStrict2; \
54 } while (0)
55
56
57#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
58#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
59#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
60#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
61#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
62#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
63#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
64#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
65#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
66 do { \
67 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
68 return iemRaiseDeviceNotAvailable(pVCpu); \
69 } while (0)
70#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
71 do { \
72 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
73 return iemRaiseDeviceNotAvailable(pVCpu); \
74 } while (0)
75#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
76 do { \
77 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
78 return iemRaiseMathFault(pVCpu); \
79 } while (0)
80#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
81 do { \
82 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
83 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
84 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
85 return iemRaiseUndefinedOpcode(pVCpu); \
86 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
87 return iemRaiseDeviceNotAvailable(pVCpu); \
88 } while (0)
89#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
90 do { \
91 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
92 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
93 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
94 return iemRaiseUndefinedOpcode(pVCpu); \
95 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
96 return iemRaiseDeviceNotAvailable(pVCpu); \
97 } while (0)
98#define IEM_MC_MAYBE_RAISE_SSE42_RELATED_XCPT() \
99 do { \
100 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
101 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
102 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse42) \
103 return iemRaiseUndefinedOpcode(pVCpu); \
104 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
105 return iemRaiseDeviceNotAvailable(pVCpu); \
106 } while (0)
107#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
108 do { \
109 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
110 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
111 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
112 return iemRaiseUndefinedOpcode(pVCpu); \
113 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
114 return iemRaiseDeviceNotAvailable(pVCpu); \
115 } while (0)
116#define IEM_MC_MAYBE_RAISE_SSSE3_RELATED_XCPT() \
117 do { \
118 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
119 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
120 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSsse3) \
121 return iemRaiseUndefinedOpcode(pVCpu); \
122 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
123 return iemRaiseDeviceNotAvailable(pVCpu); \
124 } while (0)
125#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
126 do { \
127 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
128 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
129 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
130 return iemRaiseUndefinedOpcode(pVCpu); \
131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
132 return iemRaiseDeviceNotAvailable(pVCpu); \
133 } while (0)
134#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
135 do { \
136 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
137 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
138 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
139 return iemRaiseUndefinedOpcode(pVCpu); \
140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
141 return iemRaiseDeviceNotAvailable(pVCpu); \
142 } while (0)
143#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
144 do { \
145 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
148 return iemRaiseUndefinedOpcode(pVCpu); \
149 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
150 return iemRaiseDeviceNotAvailable(pVCpu); \
151 } while (0)
152#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
153 do { \
154 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
156 return iemRaiseUndefinedOpcode(pVCpu); \
157 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
158 return iemRaiseDeviceNotAvailable(pVCpu); \
159 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
160 return iemRaiseMathFault(pVCpu); \
161 } while (0)
162#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_EX(a_fSupported) \
163 do { \
164 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
165 || !(a_fSupported)) \
166 return iemRaiseUndefinedOpcode(pVCpu); \
167 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
168 return iemRaiseDeviceNotAvailable(pVCpu); \
169 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
170 return iemRaiseMathFault(pVCpu); \
171 } while (0)
172#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
173 do { \
174 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
175 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
176 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
177 return iemRaiseUndefinedOpcode(pVCpu); \
178 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
179 return iemRaiseDeviceNotAvailable(pVCpu); \
180 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
181 return iemRaiseMathFault(pVCpu); \
182 } while (0)
183#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
184 do { \
185 if (pVCpu->iem.s.uCpl != 0) \
186 return iemRaiseGeneralProtectionFault0(pVCpu); \
187 } while (0)
188#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
189 do { \
190 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
191 else return iemRaiseGeneralProtectionFault0(pVCpu); \
192 } while (0)
193#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
194 do { \
195 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
196 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
197 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
198 return iemRaiseUndefinedOpcode(pVCpu); \
199 } while (0)
200#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
201 do { \
202 if (!IEM_IS_CANONICAL(a_u64Addr)) \
203 return iemRaiseGeneralProtectionFault0(pVCpu); \
204 } while (0)
205#define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
206 do { \
207 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
208 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) \
209 { \
210 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
211 return iemRaiseSimdFpException(pVCpu); \
212 else \
213 return iemRaiseUndefinedOpcode(pVCpu); \
214 } \
215 } while (0)
216#define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
217 do { \
218 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)\
219 return iemRaiseSimdFpException(pVCpu); \
220 else \
221 return iemRaiseUndefinedOpcode(pVCpu); \
222 } while (0)
223#define IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT() \
224 do { \
225 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
226 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
227 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fPclMul) \
228 return iemRaiseUndefinedOpcode(pVCpu); \
229 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
230 return iemRaiseDeviceNotAvailable(pVCpu); \
231 } while (0)
232
233
234#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
235#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
236#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
237#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
238#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
239#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
240#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
241 uint32_t a_Name; \
242 uint32_t *a_pName = &a_Name
243#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
244 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
245
246#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
247#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
248
249#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
250#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
251#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
252#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
253#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
254#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
255#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
256#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
257#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
258#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
259#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
260#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
261#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
262#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
263#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
264#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
265#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
266#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
267 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
268 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
269 } while (0)
270#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
271 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
272 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
273 } while (0)
274#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
275 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
276 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
277 } while (0)
278/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
279#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
280 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
281 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
282 } while (0)
283#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
284 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
285 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
286 } while (0)
287/** @note Not for IOPL or IF testing or modification. */
288#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
289#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
290#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
291#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
292
293#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
294#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
295#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
296#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
297#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
298#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
299#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
300#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
301#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
302#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
303#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
304/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
305#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
306 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
307 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
308 } while (0)
309#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
310 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
311 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
312 } while (0)
313#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
314 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
315
316
317#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
318#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
319/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
320 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
321#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
322#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
323#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
324#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
325#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
326#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
327/** @note Not for IOPL or IF testing or modification. */
328#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
329#define IEM_MC_REF_MXCSR(a_pfMxcsr) (a_pfMxcsr) = &pVCpu->cpum.GstCtx.XState.x87.MXCSR
330
331#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
332#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
333#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
334 do { \
335 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
336 *pu32Reg += (a_u32Value); \
337 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
338 } while (0)
339#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
340
341#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
342#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
343#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
344 do { \
345 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
346 *pu32Reg -= (a_u32Value); \
347 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
348 } while (0)
349#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
350#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
351
352#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
353#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
354#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
355#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
356#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
357#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
358#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
359
360#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
361#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
362#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
363#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
364
365#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
366#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
367#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
368
369#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
370#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
371#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
372
373#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
374#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
375#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
376
377#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
378#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
379#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
380
381#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
382
383#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
384
385#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
386#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
387#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
388 do { \
389 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
390 *pu32Reg &= (a_u32Value); \
391 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
392 } while (0)
393#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
394
395#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
396#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
397#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
398 do { \
399 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
400 *pu32Reg |= (a_u32Value); \
401 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
402 } while (0)
403#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
404
405
406/** @note Not for IOPL or IF modification. */
407#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
408/** @note Not for IOPL or IF modification. */
409#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
410/** @note Not for IOPL or IF modification. */
411#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
412
413#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
414
415/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
416#define IEM_MC_FPU_TO_MMX_MODE() do { \
417 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
418 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
419 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
420 } while (0)
421
422/** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
423#define IEM_MC_FPU_FROM_MMX_MODE() do { \
424 iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
425 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
426 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
427 } while (0)
428
429#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
430 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
431#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
432 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
433#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
434 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
435 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
436 } while (0)
437#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
438 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
439 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
440 } while (0)
441#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
442 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
443#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
444 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
445#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
446 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
447#define IEM_MC_MODIFIED_MREG(a_iMReg) \
448 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
449#define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
450 do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
451
452#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
453 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
454 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
455 } while (0)
456#define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
457 do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
458 (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
459 } while (0)
460#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
461 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
462#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
463 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
464#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
465 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
466#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
467 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
468 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
469 } while (0)
470#define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
471 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
472 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
473 } while (0)
474#define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
475 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
476#define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
477 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
478#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
479 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
480#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
481 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
482 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
483 } while (0)
484#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
485 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
486#define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
487 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
488#define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
489 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
490#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
491 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
492 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
493 } while (0)
494#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
495 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
496#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
497 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
498#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
499 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
500#define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
501 (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
502#define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
503 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
504#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
505 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
506#define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
507 (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
508#define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
509 (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
510#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
511 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
512 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
513 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
514 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
515 } while (0)
516
517#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
518 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
519 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
520 } while (0)
521#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
522 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
523 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
524 } while (0)
525#define IEM_MC_FETCH_YREG_2ND_U64(a_u64Dst, a_iYRegSrc) \
526 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
527 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
528 } while (0)
529#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
530 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
531 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
532 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
533 } while (0)
534#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
535 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
536 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
537 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
538 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
539 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
540 } while (0)
541
542#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
543#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
544 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
545 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
546 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
547 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
548 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
549 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
550 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
551 } while (0)
552#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
553 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
554 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
555 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
556 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
557 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
558 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
559 } while (0)
560#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
561 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
562 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
563 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
564 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
565 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
566 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
567 } while (0)
568#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
569 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
570 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
571 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
572 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
573 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
574 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
575 } while (0)
576
577#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
578 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
579#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
580 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
581#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
582 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
583#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
584 do { uintptr_t const iYRegTmp = (a_iYReg); \
585 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
586 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
587 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
588 } while (0)
589
590#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
591 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
592 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
593 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
594 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
595 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
596 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
597 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
598 } while (0)
599#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
600 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
601 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
602 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
603 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
604 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
605 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
606 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
607 } while (0)
608#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
609 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
610 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
611 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
612 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
613 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
614 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
615 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
616 } while (0)
617
618#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
619 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
620 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
621 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
622 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
623 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
624 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
625 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
626 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
627 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
628 } while (0)
629#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
630 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
631 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
632 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
633 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
634 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
635 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
636 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
637 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
638 } while (0)
639#define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
640 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
641 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
642 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
643 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
644 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
645 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
646 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
647 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
648 } while (0)
649#define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
650 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
651 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
652 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
653 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
654 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
655 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
656 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
657 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
658 } while (0)
659#define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
660 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
661 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
662 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
663 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u64Local); \
664 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
665 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
666 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
667 } while (0)
668#define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
669 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
670 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
671 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
672 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
673 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
674 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
675 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
676 } while (0)
677
678#ifndef IEM_WITH_SETJMP
679# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
681# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
683# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
685#else
686# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
687 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
688# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
689 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
690# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
691 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
692#endif
693
694#ifndef IEM_WITH_SETJMP
695# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
697# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
699# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
701#else
702# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
703 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
704# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
705 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
706# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
707 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
708#endif
709
710#ifndef IEM_WITH_SETJMP
711# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
713# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
715# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
717#else
718# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
719 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
720# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
721 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
722# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
723 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
724#endif
725
726#ifdef SOME_UNUSED_FUNCTION
727# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
729#endif
730
731#ifndef IEM_WITH_SETJMP
732# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
734# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
736# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
738# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
740#else
741# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
742 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
743# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
744 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
745# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
746 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
747# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
748 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
749#endif
750
751#ifndef IEM_WITH_SETJMP
752# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
754# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
756# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
758# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
760#else
761# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
762 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
763# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
764 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
765# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
766 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
767# define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
768 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
769#endif
770
771#ifndef IEM_WITH_SETJMP
772# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
774# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
776# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
778
779# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
781# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
783# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
785# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_XmmDst).au32[(a_iDWord)], (a_iSeg), (a_GCPtrMem)))
787# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_XmmDst).au64[(a_iQWord)], (a_iSeg), (a_GCPtrMem)))
789#else
790# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
791 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
792# define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
793 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
794# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
795 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
796
797# define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
798 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
799# define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
800 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
801# define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
802 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
803# define IEM_MC_FETCH_MEM_XMM_U32(a_XmmDst, a_iDWord, a_iSeg, a_GCPtrMem) \
804 (a_XmmDst).au32[(a_iDWord)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
805# define IEM_MC_FETCH_MEM_XMM_U64(a_XmmDst, a_iQWord, a_iSeg, a_GCPtrMem) \
806 (a_XmmDst).au64[(a_iQWord)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))
807#endif
808
809#ifndef IEM_WITH_SETJMP
810# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
812# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
813 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
814# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
815 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
816
817# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
819# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
821# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
823#else
824# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
825 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
826# define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
827 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
828# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
829 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
830
831# define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
832 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
833# define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
834 iemMemFetchDataU256Jmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
835# define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
836 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
837#endif
838
839
840
841#ifndef IEM_WITH_SETJMP
842# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
843 do { \
844 uint8_t u8Tmp; \
845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
846 (a_u16Dst) = u8Tmp; \
847 } while (0)
848# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
849 do { \
850 uint8_t u8Tmp; \
851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
852 (a_u32Dst) = u8Tmp; \
853 } while (0)
854# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
855 do { \
856 uint8_t u8Tmp; \
857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
858 (a_u64Dst) = u8Tmp; \
859 } while (0)
860# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
861 do { \
862 uint16_t u16Tmp; \
863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
864 (a_u32Dst) = u16Tmp; \
865 } while (0)
866# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
867 do { \
868 uint16_t u16Tmp; \
869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
870 (a_u64Dst) = u16Tmp; \
871 } while (0)
872# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
873 do { \
874 uint32_t u32Tmp; \
875 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
876 (a_u64Dst) = u32Tmp; \
877 } while (0)
878#else /* IEM_WITH_SETJMP */
879# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
880 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
881# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
882 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
883# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
884 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
885# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
886 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
887# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
888 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
889# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
890 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
891#endif /* IEM_WITH_SETJMP */
892
893#ifndef IEM_WITH_SETJMP
894# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
895 do { \
896 uint8_t u8Tmp; \
897 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
898 (a_u16Dst) = (int8_t)u8Tmp; \
899 } while (0)
900# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
901 do { \
902 uint8_t u8Tmp; \
903 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
904 (a_u32Dst) = (int8_t)u8Tmp; \
905 } while (0)
906# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
907 do { \
908 uint8_t u8Tmp; \
909 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
910 (a_u64Dst) = (int8_t)u8Tmp; \
911 } while (0)
912# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
913 do { \
914 uint16_t u16Tmp; \
915 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
916 (a_u32Dst) = (int16_t)u16Tmp; \
917 } while (0)
918# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
919 do { \
920 uint16_t u16Tmp; \
921 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
922 (a_u64Dst) = (int16_t)u16Tmp; \
923 } while (0)
924# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
925 do { \
926 uint32_t u32Tmp; \
927 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
928 (a_u64Dst) = (int32_t)u32Tmp; \
929 } while (0)
930#else /* IEM_WITH_SETJMP */
931# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
932 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
933# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
934 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
935# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
936 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
937# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
938 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
939# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
940 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
941# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
942 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
943#endif /* IEM_WITH_SETJMP */
944
945#ifndef IEM_WITH_SETJMP
946# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
947 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
948# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
949 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
950# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
951 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
952# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
953 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
954#else
955# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
956 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
957# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
958 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
959# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
960 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
961# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
962 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
963#endif
964
965#ifndef IEM_WITH_SETJMP
966# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
967 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
968# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
969 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
970# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
971 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
972# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
973 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
974#else
975# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
976 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
977# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
978 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
979# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
980 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
981# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
982 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
983#endif
984
985#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
986#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
987#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
988#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
989#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
990#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
991#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
992 do { \
993 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
994 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
995 } while (0)
996#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
997 do { \
998 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
999 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
1000 } while (0)
1001
1002#ifndef IEM_WITH_SETJMP
1003# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1004 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1005# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1006 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
1007#else
1008# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
1009 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1010# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
1011 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
1012#endif
1013
1014#ifndef IEM_WITH_SETJMP
1015# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1016 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1017# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1018 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
1019#else
1020# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
1021 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1022# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
1023 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
1024#endif
1025
1026
1027#define IEM_MC_PUSH_U16(a_u16Value) \
1028 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
1029#define IEM_MC_PUSH_U32(a_u32Value) \
1030 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
1031#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
1032 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
1033#define IEM_MC_PUSH_U64(a_u64Value) \
1034 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
1035
1036#define IEM_MC_POP_U16(a_pu16Value) \
1037 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
1038#define IEM_MC_POP_U32(a_pu32Value) \
1039 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
1040#define IEM_MC_POP_U64(a_pu64Value) \
1041 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
1042
1043/** Maps guest memory for direct or bounce buffered access.
1044 * The purpose is to pass it to an operand implementation, thus the a_iArg.
1045 * @remarks May return.
1046 */
1047#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
1048 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \
1049 (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
1050
1051/** Maps guest memory for direct or bounce buffered access.
1052 * The purpose is to pass it to an operand implementation, thus the a_iArg.
1053 * @remarks May return.
1054 */
1055#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_cbAlign, a_iArg) \
1056 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \
1057 (a_GCPtrMem), (a_fAccess), (a_cbAlign)))
1058
1059/** Commits the memory and unmaps the guest memory.
1060 * @remarks May return.
1061 */
1062#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
1063 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
1064
1065/** Commits the memory and unmaps the guest memory unless the FPU status word
1066 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
1067 * that would cause FLD not to store.
1068 *
1069 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
1070 * store, while \#P will not.
1071 *
1072 * @remarks May in theory return - for now.
1073 */
1074#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
1075 do { \
1076 if ( !(a_u16FSW & X86_FSW_ES) \
1077 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
1078 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
1079 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
1080 } while (0)
1081
1082/** Calculate efficient address from R/M. */
1083#ifndef IEM_WITH_SETJMP
1084# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
1085 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
1086#else
1087# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
1088 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
1089#endif
1090
1091#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
1092#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
1093#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
1094#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
1095#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
1096#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
1097#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
1098
1099/**
1100 * Defers the rest of the instruction emulation to a C implementation routine
1101 * and returns, only taking the standard parameters.
1102 *
1103 * @param a_pfnCImpl The pointer to the C routine.
1104 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
1105 */
1106#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
1107
1108/**
1109 * Defers the rest of instruction emulation to a C implementation routine and
1110 * returns, taking one argument in addition to the standard ones.
1111 *
1112 * @param a_pfnCImpl The pointer to the C routine.
1113 * @param a0 The argument.
1114 */
1115#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
1116
1117/**
1118 * Defers the rest of the instruction emulation to a C implementation routine
1119 * and returns, taking two arguments in addition to the standard ones.
1120 *
1121 * @param a_pfnCImpl The pointer to the C routine.
1122 * @param a0 The first extra argument.
1123 * @param a1 The second extra argument.
1124 */
1125#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
1126
1127/**
1128 * Defers the rest of the instruction emulation to a C implementation routine
1129 * and returns, taking three arguments in addition to the standard ones.
1130 *
1131 * @param a_pfnCImpl The pointer to the C routine.
1132 * @param a0 The first extra argument.
1133 * @param a1 The second extra argument.
1134 * @param a2 The third extra argument.
1135 */
1136#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
1137
1138/**
1139 * Defers the rest of the instruction emulation to a C implementation routine
1140 * and returns, taking four arguments in addition to the standard ones.
1141 *
1142 * @param a_pfnCImpl The pointer to the C routine.
1143 * @param a0 The first extra argument.
1144 * @param a1 The second extra argument.
1145 * @param a2 The third extra argument.
1146 * @param a3 The fourth extra argument.
1147 */
1148#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
1149
1150/**
1151 * Defers the rest of the instruction emulation to a C implementation routine
1152 * and returns, taking two arguments in addition to the standard ones.
1153 *
1154 * @param a_pfnCImpl The pointer to the C routine.
1155 * @param a0 The first extra argument.
1156 * @param a1 The second extra argument.
1157 * @param a2 The third extra argument.
1158 * @param a3 The fourth extra argument.
1159 * @param a4 The fifth extra argument.
1160 */
1161#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
1162
1163/**
1164 * Defers the entire instruction emulation to a C implementation routine and
1165 * returns, only taking the standard parameters.
1166 *
1167 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1168 *
1169 * @param a_pfnCImpl The pointer to the C routine.
1170 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
1171 */
1172#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
1173
1174/**
1175 * Defers the entire instruction emulation to a C implementation routine and
1176 * returns, taking one argument in addition to the standard ones.
1177 *
1178 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1179 *
1180 * @param a_pfnCImpl The pointer to the C routine.
1181 * @param a0 The argument.
1182 */
1183#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
1184
1185/**
1186 * Defers the entire instruction emulation to a C implementation routine and
1187 * returns, taking two arguments in addition to the standard ones.
1188 *
1189 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1190 *
1191 * @param a_pfnCImpl The pointer to the C routine.
1192 * @param a0 The first extra argument.
1193 * @param a1 The second extra argument.
1194 */
1195#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
1196
1197/**
1198 * Defers the entire instruction emulation to a C implementation routine and
1199 * returns, taking three arguments in addition to the standard ones.
1200 *
1201 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1202 *
1203 * @param a_pfnCImpl The pointer to the C routine.
1204 * @param a0 The first extra argument.
1205 * @param a1 The second extra argument.
1206 * @param a2 The third extra argument.
1207 */
1208#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
1209
1210/**
1211 * Calls a FPU assembly implementation taking one visible argument.
1212 *
1213 * @param a_pfnAImpl Pointer to the assembly FPU routine.
1214 * @param a0 The first extra argument.
1215 */
1216#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
1217 do { \
1218 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
1219 } while (0)
1220
1221/**
1222 * Calls a FPU assembly implementation taking two visible arguments.
1223 *
1224 * @param a_pfnAImpl Pointer to the assembly FPU routine.
1225 * @param a0 The first extra argument.
1226 * @param a1 The second extra argument.
1227 */
1228#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
1229 do { \
1230 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1231 } while (0)
1232
1233/**
1234 * Calls a FPU assembly implementation taking three visible arguments.
1235 *
1236 * @param a_pfnAImpl Pointer to the assembly FPU routine.
1237 * @param a0 The first extra argument.
1238 * @param a1 The second extra argument.
1239 * @param a2 The third extra argument.
1240 */
1241#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1242 do { \
1243 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1244 } while (0)
1245
1246#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
1247 do { \
1248 (a_FpuData).FSW = (a_FSW); \
1249 (a_FpuData).r80Result = *(a_pr80Value); \
1250 } while (0)
1251
1252/** Pushes FPU result onto the stack. */
1253#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
1254 iemFpuPushResult(pVCpu, &a_FpuData)
1255/** Pushes FPU result onto the stack and sets the FPUDP. */
1256#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
1257 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
1258
1259/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
1260#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
1261 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
1262
1263/** Stores FPU result in a stack register. */
1264#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
1265 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
1266/** Stores FPU result in a stack register and pops the stack. */
1267#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
1268 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
1269/** Stores FPU result in a stack register and sets the FPUDP. */
1270#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
1271 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
1272/** Stores FPU result in a stack register, sets the FPUDP, and pops the
1273 * stack. */
1274#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
1275 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
1276
1277/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
1278#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
1279 iemFpuUpdateOpcodeAndIp(pVCpu)
1280/** Free a stack register (for FFREE and FFREEP). */
1281#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
1282 iemFpuStackFree(pVCpu, a_iStReg)
1283/** Increment the FPU stack pointer. */
1284#define IEM_MC_FPU_STACK_INC_TOP() \
1285 iemFpuStackIncTop(pVCpu)
1286/** Decrement the FPU stack pointer. */
1287#define IEM_MC_FPU_STACK_DEC_TOP() \
1288 iemFpuStackDecTop(pVCpu)
1289
1290/** Updates the FSW, FOP, FPUIP, and FPUCS. */
1291#define IEM_MC_UPDATE_FSW(a_u16FSW) \
1292 iemFpuUpdateFSW(pVCpu, a_u16FSW)
1293/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
1294#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
1295 iemFpuUpdateFSW(pVCpu, a_u16FSW)
1296/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
1297#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
1298 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
1299/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
1300#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
1301 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
1302/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
1303 * stack. */
1304#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
1305 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
1306/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
1307#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
1308 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
1309
1310/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
1311#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
1312 iemFpuStackUnderflow(pVCpu, a_iStDst)
1313/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
1314 * stack. */
1315#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
1316 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
1317/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
1318 * FPUDS. */
1319#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
1320 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
1321/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
1322 * FPUDS. Pops stack. */
1323#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
1324 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
1325/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
1326 * stack twice. */
1327#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
1328 iemFpuStackUnderflowThenPopPop(pVCpu)
1329/** Raises a FPU stack underflow exception for an instruction pushing a result
1330 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
1331#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
1332 iemFpuStackPushUnderflow(pVCpu)
1333/** Raises a FPU stack underflow exception for an instruction pushing a result
1334 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
1335#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
1336 iemFpuStackPushUnderflowTwo(pVCpu)
1337
1338/** Raises a FPU stack overflow exception as part of a push attempt. Sets
1339 * FPUIP, FPUCS and FOP. */
1340#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
1341 iemFpuStackPushOverflow(pVCpu)
1342/** Raises a FPU stack overflow exception as part of a push attempt. Sets
1343 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
1344#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
1345 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
1346/** Prepares for using the FPU state.
1347 * Ensures that we can use the host FPU in the current context (RC+R0.
1348 * Ensures the guest FPU state in the CPUMCTX is up to date. */
1349#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
1350/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
1351#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
1352/** Actualizes the guest FPU state so it can be accessed and modified. */
1353#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
1354
1355/** Stores SSE SIMD result updating MXCSR. */
1356#define IEM_MC_STORE_SSE_RESULT(a_SseData, a_iXmmReg) \
1357 iemSseStoreResult(pVCpu, &a_SseData, a_iXmmReg)
1358/** Updates MXCSR. */
1359#define IEM_MC_SSE_UPDATE_MXCSR(a_fMxcsr) \
1360 iemSseUpdateMxcsr(pVCpu, a_fMxcsr)
1361
1362/** Prepares for using the SSE state.
1363 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
1364 * Ensures the guest SSE state in the CPUMCTX is up to date. */
1365#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
1366/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
1367#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
1368/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
1369#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
1370
1371/** Prepares for using the AVX state.
1372 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
1373 * Ensures the guest AVX state in the CPUMCTX is up to date.
1374 * @note This will include the AVX512 state too when support for it is added
1375 * due to the zero extending feature of VEX instruction. */
1376#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
1377/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
1378#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
1379/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
1380#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
1381
1382/**
1383 * Calls a MMX assembly implementation taking two visible arguments.
1384 *
1385 * @param a_pfnAImpl Pointer to the assembly MMX routine.
1386 * @param a0 The first extra argument.
1387 * @param a1 The second extra argument.
1388 */
1389#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
1390 do { \
1391 IEM_MC_PREPARE_FPU_USAGE(); \
1392 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1393 } while (0)
1394
1395/**
1396 * Calls a MMX assembly implementation taking three visible arguments.
1397 *
1398 * @param a_pfnAImpl Pointer to the assembly MMX routine.
1399 * @param a0 The first extra argument.
1400 * @param a1 The second extra argument.
1401 * @param a2 The third extra argument.
1402 */
1403#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1404 do { \
1405 IEM_MC_PREPARE_FPU_USAGE(); \
1406 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1407 } while (0)
1408
1409
1410/**
1411 * Calls a SSE assembly implementation taking two visible arguments.
1412 *
1413 * @param a_pfnAImpl Pointer to the assembly SSE routine.
1414 * @param a0 The first extra argument.
1415 * @param a1 The second extra argument.
1416 */
1417#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
1418 do { \
1419 IEM_MC_PREPARE_SSE_USAGE(); \
1420 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1421 } while (0)
1422
1423/**
1424 * Calls a SSE assembly implementation taking three visible arguments.
1425 *
1426 * @param a_pfnAImpl Pointer to the assembly SSE routine.
1427 * @param a0 The first extra argument.
1428 * @param a1 The second extra argument.
1429 * @param a2 The third extra argument.
1430 */
1431#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1432 do { \
1433 IEM_MC_PREPARE_SSE_USAGE(); \
1434 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1435 } while (0)
1436
1437
1438/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
1439 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
1440#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
1441 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
1442
1443/**
1444 * Calls a AVX assembly implementation taking two visible arguments.
1445 *
1446 * There is one implicit zero'th argument, a pointer to the extended state.
1447 *
1448 * @param a_pfnAImpl Pointer to the assembly AVX routine.
1449 * @param a1 The first extra argument.
1450 * @param a2 The second extra argument.
1451 */
1452#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
1453 do { \
1454 IEM_MC_PREPARE_AVX_USAGE(); \
1455 a_pfnAImpl(pXState, (a1), (a2)); \
1456 } while (0)
1457
1458/**
1459 * Calls a AVX assembly implementation taking three visible arguments.
1460 *
1461 * There is one implicit zero'th argument, a pointer to the extended state.
1462 *
1463 * @param a_pfnAImpl Pointer to the assembly AVX routine.
1464 * @param a1 The first extra argument.
1465 * @param a2 The second extra argument.
1466 * @param a3 The third extra argument.
1467 */
1468#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
1469 do { \
1470 IEM_MC_PREPARE_AVX_USAGE(); \
1471 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
1472 } while (0)
1473
1474/** @note Not for IOPL or IF testing. */
1475#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
1476/** @note Not for IOPL or IF testing. */
1477#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
1478/** @note Not for IOPL or IF testing. */
1479#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
1480/** @note Not for IOPL or IF testing. */
1481#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
1482/** @note Not for IOPL or IF testing. */
1483#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
1484 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1485 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1486/** @note Not for IOPL or IF testing. */
1487#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
1488 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1489 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1490/** @note Not for IOPL or IF testing. */
1491#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
1492 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
1493 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1494 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1495/** @note Not for IOPL or IF testing. */
1496#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
1497 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
1498 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1499 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1500#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
1501#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
1502#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
1503/** @note Not for IOPL or IF testing. */
1504#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
1505 if ( pVCpu->cpum.GstCtx.cx != 0 \
1506 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1507/** @note Not for IOPL or IF testing. */
1508#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
1509 if ( pVCpu->cpum.GstCtx.ecx != 0 \
1510 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1511/** @note Not for IOPL or IF testing. */
1512#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
1513 if ( pVCpu->cpum.GstCtx.rcx != 0 \
1514 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1515/** @note Not for IOPL or IF testing. */
1516#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
1517 if ( pVCpu->cpum.GstCtx.cx != 0 \
1518 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1519/** @note Not for IOPL or IF testing. */
1520#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
1521 if ( pVCpu->cpum.GstCtx.ecx != 0 \
1522 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1523/** @note Not for IOPL or IF testing. */
1524#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
1525 if ( pVCpu->cpum.GstCtx.rcx != 0 \
1526 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
1527#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
1528#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
1529
1530#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
1531 do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW, a_iSt)].r80; } while (0)
1532#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
1533 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
1534#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
1535 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
1536#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
1537 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
1538#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
1539 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
1540#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
1541 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
1542#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
1543 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
1544#define IEM_MC_IF_FCW_IM() \
1545 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
1546#define IEM_MC_IF_MXCSR_XCPT_PENDING() \
1547 if (( ~((pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
1548 & (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0) {
1549
1550#define IEM_MC_ELSE() } else {
1551#define IEM_MC_ENDIF() } do {} while (0)
1552
1553/** @} */
1554
1555#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
1556
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette