VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMMc.h@ 108959

Last change on this file since 108959 was 108901, checked in by vboxsync, 13 days ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 78.4 KB
Line 
1/* $Id: IEMMc.h 108901 2025-04-08 18:32:29Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - IEM_MC_XXX, common.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
29#define VMM_INCLUDED_SRC_include_IEMMc_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @name "Microcode" macros.
36 *
37 * The idea is that we should be able to use the same code to interpret
38 * instructions as well as recompiler instructions. Thus this obfuscation.
39 *
40 * There are target specific "microcodes" in addition to the ones listed here.
41 * The target specific header may also override the definitions here to allow
42 * for differences.
43 *
44 * @{
45 */
46
47#define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) {
48#define IEM_MC_END() }
49
50
51/** Dummy MC that prevents native recompilation. */
52#define IEM_MC_NO_NATIVE_RECOMPILE() ((void)0)
53
54/** Advances RIP, finishes the instruction and returns.
55 * This may include raising debug exceptions and such. */
56#define IEM_MC_ADVANCE_PC_AND_FINISH() return iemRegAddToPcAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
57
58
59/** Sets PC, finishes the instruction and returns. */
60#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) return iemRegPcRelativeJumpS8AndFinishClearingRF(pVCpu, (a_i8))
61/** Sets PC, finishes the instruction and returns. */
62#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) return iemRegPcRelativeJumpS16AndFinishClearingRF(pVCpu, (a_i16))
63/** Sets PC, finishes the instruction and returns. */
64#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) return iemRegPcRelativeJumpS32AndFinishClearingRF(pVCpu, (a_i32))
65/** Sets PC, finishes the instruction and returns. */
66#define IEM_MC_IND_JMP_U16_AND_FINISH(a_u16NewIP) return iemRegPcJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP))
67/** Sets PC, finishes the instruction and returns. */
68#define IEM_MC_IND_JMP_U32_AND_FINISH(a_u32NewIP) return iemRegPcJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP))
69/** Sets PC, finishes the instruction and returns. */
70#define IEM_MC_IND_JMP_U64_AND_FINISH(a_u64NewIP) return iemRegPcJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP))
71
72/** Saves the return address, sets PC, finishes the instruction and returns. */
73#define IEM_MC_REL_CALL_S16_AND_FINISH(a_i16) return iemRegPcRelativeCallS16AndFinishClearingRF(pVCpu, (a_i16))
74/** Saves the return address, sets PC, finishes the instruction and returns. */
75#define IEM_MC_REL_CALL_S32_AND_FINISH(a_i32) return iemRegPcRelativeCallS32AndFinishClearingRF(pVCpu, (a_i32))
76/** Saves the return address, sets PC, finishes the instruction and returns. */
77#define IEM_MC_REL_CALL_S64_AND_FINISH(a_i64) return iemRegPcRelativeCallS64AndFinishClearingRF(pVCpu, (a_i64))
78/** Saves the return address, sets PC, finishes the instruction and returns. */
79#define IEM_MC_IND_CALL_U16_AND_FINISH(a_u16NewIP) return iemRegPcIndirectCallU16AndFinishClearingRF((pVCpu), (a_u16NewIP))
80/** Saves the return address, sets PC, finishes the instruction and returns. */
81#define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP) return iemRegPcIndirectCallU32AndFinishClearingRF((pVCpu), (a_u32NewIP))
82/** Saves the return address, sets PC, finishes the instruction and returns. */
83#define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP) return iemRegPcIndirectCallU64AndFinishClearingRF((pVCpu), (a_u64NewIP))
84
85
86#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
87#define IEM_MC_LOCAL_ASSIGN(a_Type, a_Name, a_Value) a_Type a_Name = (a_Value)
88#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
89#define IEM_MC_NOREF(a_Name) RT_NOREF_PV(a_Name) /* NOP/liveness hack */
90#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
91#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
92#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
93
94/** ASSUMES the source variable not used after this statement. */
95#define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) (a_VarDst) = (a_VarSrcEol)
96
97#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
98#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
99#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
100#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
101#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
102#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
103#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
104#define IEM_MC_FETCH_GREG_I16(a_i16Dst, a_iGReg) (a_i16Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
105#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
106#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
107#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
108#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
109#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
110#define IEM_MC_FETCH_GREG_I32(a_i32Dst, a_iGReg) (a_i32Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
111#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
112#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
113#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
114#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
115#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
116#define IEM_MC_FETCH_GREG_PAIR_U32(a_u64Dst, a_iGRegLo, a_iGRegHi) do { \
117 (a_u64Dst).s.Lo = iemGRegFetchU32(pVCpu, (a_iGRegLo)); \
118 (a_u64Dst).s.Hi = iemGRegFetchU32(pVCpu, (a_iGRegHi)); \
119 } while(0)
120#define IEM_MC_FETCH_GREG_PAIR_U64(a_u128Dst, a_iGRegLo, a_iGRegHi) do { \
121 (a_u128Dst).s.Lo = iemGRegFetchU64(pVCpu, (a_iGRegLo)); \
122 (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
123 } while(0)
124
125/** @todo these zero-extends the result, which can be a bit confusing for
126 * IEM_MC_STORE_GREG_I32... */
127#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
128#define IEM_MC_STORE_GREG_I32(a_iGReg, a_i32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_i32Value) /* clear high bits. */
129#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
130#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value) *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
131#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
132#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
133#define IEM_MC_STORE_GREG_PAIR_U32(a_iGRegLo, a_iGRegHi, a_u64Value) do { \
134 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint32_t)(a_u64Value).s.Lo; \
135 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint32_t)(a_u64Value).s.Hi; \
136 } while(0)
137#define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) do { \
138 *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint64_t)(a_u128Value).s.Lo; \
139 *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint64_t)(a_u128Value).s.Hi; \
140 } while(0)
141#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
142
143
144#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
145#define IEM_MC_REF_GREG_U8_CONST(a_pu8Dst, a_iGReg) (a_pu8Dst) = (uint8_t const *)iemGRegRefU8( pVCpu, (a_iGReg))
146#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
147#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
148/** @todo X86: User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
149 * commit. Use IEM_MC_CLEAR_HIGH_GREG_U64! */
150#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
151#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
152#define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t *)iemGRegRefU32(pVCpu, (a_iGReg))
153#define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) (a_pi32Dst) = (int32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
154#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
155#define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
156#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t *)iemGRegRefU64(pVCpu, (a_iGReg))
157#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) (a_pi64Dst) = (int64_t const *)iemGRegRefU64(pVCpu, (a_iGReg))
158
159#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
160 do { /* Clears the high 32 bits of the register. */ \
161 uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
162 *pu64Reg = (uint32_t)((uint32_t)*pu64Reg + (a_u32Value)); \
163 } while (0)
164#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
165
166#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
167 do { /* Clears the high 32 bits of the register. */ \
168 uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
169 *pu64Reg = (uint32_t)((uint32_t)*pu64Reg - (a_u8Const)); \
170 } while (0)
171#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
172#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
173
174#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
175#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
176#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
177#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
178#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
179#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
180#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
181
182#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
183#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
184#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
185#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
186
187#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
188#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
189#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
190
191#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
192#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
193#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
194
195#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
196#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
197#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
198
199#define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift) do { (a_u8Local) >>= (a_cShift); } while (0)
200
201#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
202#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
203#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
204
205#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
206
207#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
208
209#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
210 do { /* Clears the high 32 bits of the register. */ \
211 uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
212 *pu64Reg = (uint32_t)((uint32_t)*pu64Reg & (a_u32Value)); \
213 } while (0)
214#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
215
216#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
217 do { /* Clears the high 32 bits of the register. */ \
218 uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
219 *pu64Reg = (uint32_t)((uint32_t)*pu64Reg | (a_u32Value)); \
220 } while (0)
221#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
222
223#define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) (a_u16Local) = RT_BSWAP_U16((a_u16Local));
224#define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) (a_u32Local) = RT_BSWAP_U32((a_u32Local));
225#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) (a_u64Local) = RT_BSWAP_U64((a_u64Local));
226
227
228#define IEM_MC_FETCH_MEM_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
229 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
230#define IEM_MC_FETCH_MEM16_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
231 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
232#define IEM_MC_FETCH_MEM32_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
233 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
234
235#define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
236 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
237#define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
238 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
239#define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
240 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
241
242#define IEM_MC_FETCH_MEM_SEG_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
243 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
244#define IEM_MC_FETCH_MEM_SEG_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
245 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
246#define IEM_MC_FETCH_MEM_SEG_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
247 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
248#define IEM_MC_FETCH_MEM_SEG_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
249 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
250
251#define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
252 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
253#define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
254 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
255#define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
256 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
257#define IEM_MC_FETCH_MEM_FLAT_I16_DISP(a_i16Dst, a_GCPtrMem, a_offDisp) \
258 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
259
260#define IEM_MC_FETCH_MEM_SEG_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
261 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
262#define IEM_MC_FETCH_MEM_SEG_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
263 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
264#define IEM_MC_FETCH_MEM_SEG_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
265 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
266#define IEM_MC_FETCH_MEM_SEG_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
267 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
268
269#define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
270 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
271#define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
272 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
273#define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
274 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
275#define IEM_MC_FETCH_MEM_FLAT_I32_DISP(a_i32Dst, a_GCPtrMem, a_offDisp) \
276 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
277
278#define IEM_MC_FETCH_MEM_SEG_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
279 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
280#define IEM_MC_FETCH_MEM_SEG_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
281 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
282#define IEM_MC_FETCH_MEM_SEG_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
283 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
284#define IEM_MC_FETCH_MEM_SEG_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
285 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
286
287#define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
288 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
289#define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
290 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
291#define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
292 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
293#define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
294 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
295
296#define IEM_MC_FETCH_MEM_SEG_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
297 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
298#define IEM_MC_FETCH_MEM_SEG_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
299 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
300
301#define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
302 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
303#define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
304 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
305
306#define IEM_MC_FETCH_MEM_SEG_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
307 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
308#define IEM_MC_FETCH_MEM_SEG_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
309 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
310#define IEM_MC_FETCH_MEM_SEG_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
311 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
312
313#define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
314 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
315#define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
316 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
317#define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
318 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
319
320#define IEM_MC_FETCH_MEM_SEG_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
321 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
322#define IEM_MC_FETCH_MEM_SEG_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
323 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
324#define IEM_MC_FETCH_MEM_SEG_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
325 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
326
327#define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
328 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
329#define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
330 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
331#define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
332 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
333
334
335#define IEM_MC_FETCH_MEM_SEG_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
336 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
337#define IEM_MC_FETCH_MEM_SEG_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
338 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
339#define IEM_MC_FETCH_MEM_SEG_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
340 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
341#define IEM_MC_FETCH_MEM_SEG_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
342 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
343#define IEM_MC_FETCH_MEM_SEG_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
344 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
345#define IEM_MC_FETCH_MEM_SEG_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
346 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
347
348#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
349 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
350#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
351 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
352#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
353 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
354#define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
355 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
356#define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
357 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
358#define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
359 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
360
361#define IEM_MC_FETCH_MEM_SEG_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
362 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
363#define IEM_MC_FETCH_MEM_SEG_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
364 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
365#define IEM_MC_FETCH_MEM_SEG_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
366 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
367#define IEM_MC_FETCH_MEM_SEG_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
368 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
369#define IEM_MC_FETCH_MEM_SEG_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
370 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
371#define IEM_MC_FETCH_MEM_SEG_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
372 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
373
374#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
375 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
376#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
377 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
378#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
379 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
380#define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
381 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
382#define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
383 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
384#define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
385 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
386
387#define IEM_MC_STORE_MEM_SEG_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
388 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
389#define IEM_MC_STORE_MEM_SEG_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
390 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
391#define IEM_MC_STORE_MEM_SEG_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
392 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
393#define IEM_MC_STORE_MEM_SEG_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
394 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
395
396#define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
397 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
398#define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
399 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
400#define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
401 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
402#define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
403 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
404
405#define IEM_MC_STORE_MEM_SEG_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
406 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
407#define IEM_MC_STORE_MEM_SEG_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
408 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
409#define IEM_MC_STORE_MEM_SEG_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
410 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
411#define IEM_MC_STORE_MEM_SEG_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
412 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
413
414#define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
415 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
416#define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
417 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
418#define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
419 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
420#define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
421 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
422
423#define IEM_MC_STORE_MEM_BY_REF_I8_CONST( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
424#define IEM_MC_STORE_MEM_BY_REF_I16_CONST(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
425#define IEM_MC_STORE_MEM_BY_REF_I32_CONST(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
426#define IEM_MC_STORE_MEM_BY_REF_I64_CONST(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
427#define IEM_MC_STORE_MEM_BY_REF_R32_NEG_QNAN(a_pr32Dst) (a_pr32Dst)->u = UINT32_C(0xffc00000)
428#define IEM_MC_STORE_MEM_BY_REF_R64_NEG_QNAN(a_pr64Dst) (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
429
430#define IEM_MC_STORE_MEM_SEG_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
431 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
432#define IEM_MC_STORE_MEM_SEG_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
433 iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
434
435#define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
436 iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
437#define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
438 iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
439
440#define IEM_MC_STORE_MEM_SEG_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
441 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
442#define IEM_MC_STORE_MEM_SEG_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
443 iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
444
445#define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
446 iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
447#define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
448 iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
449
450
451/* 8-bit */
452
453/**
454 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
455 * acccess, for atomic operations.
456 *
457 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
458 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
459 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
460 * @param[in] a_GCPtrMem The memory address.
461 * @remarks Will return/long jump on errors.
462 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
463 */
464#define IEM_MC_MEM_SEG_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
465 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
466
467/**
468 * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
469 *
470 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
471 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
472 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
473 * @param[in] a_GCPtrMem The memory address.
474 * @remarks Will return/long jump on errors.
475 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
476 */
477#define IEM_MC_MEM_SEG_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
478 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
479
480/**
481 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
482 *
483 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
484 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
485 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
486 * @param[in] a_GCPtrMem The memory address.
487 * @remarks Will return/long jump on errors.
488 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
489 */
490#define IEM_MC_MEM_SEG_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
491 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
492
493/**
494 * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
495 *
496 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
497 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
498 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
499 * @param[in] a_GCPtrMem The memory address.
500 * @remarks Will return/long jump on errors.
501 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
502 */
503#define IEM_MC_MEM_SEG_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
504 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
505
506/**
507 * Maps guest memory for byte atomic read+write direct (or bounce) buffer
508 * acccess, flat address variant.
509 *
510 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
511 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
512 * @param[in] a_GCPtrMem The memory address.
513 * @remarks Will return/long jump on errors.
514 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
515 */
516#define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
517 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
518
519/**
520 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
521 * address variant.
522 *
523 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
524 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
525 * @param[in] a_GCPtrMem The memory address.
526 * @remarks Will return/long jump on errors.
527 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
528 */
529#define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
530 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
531
532/**
533 * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
534 * address variant.
535 *
536 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
537 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
538 * @param[in] a_GCPtrMem The memory address.
539 * @remarks Will return/long jump on errors.
540 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
541 */
542#define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
543 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
544
545/**
546 * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
547 * address variant.
548 *
549 * @param[out] a_pu8Mem Where to return the pointer to the mapping.
550 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
551 * @param[in] a_GCPtrMem The memory address.
552 * @remarks Will return/long jump on errors.
553 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
554 */
555#define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
556 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
557
558
559/* 16-bit */
560
561/**
562 * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess.
563 *
564 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
565 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
566 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
567 * @param[in] a_GCPtrMem The memory address.
568 * @remarks Will return/long jump on errors.
569 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
570 */
571#define IEM_MC_MEM_SEG_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
572 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
573
574/**
575 * Maps guest memory for word read+write direct (or bounce) buffer acccess.
576 *
577 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
578 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
579 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
580 * @param[in] a_GCPtrMem The memory address.
581 * @remarks Will return/long jump on errors.
582 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
583 */
584#define IEM_MC_MEM_SEG_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
585 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
586
587/**
588 * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
589 *
590 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
591 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
592 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
593 * @param[in] a_GCPtrMem The memory address.
594 * @remarks Will return/long jump on errors.
595 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
596 */
597#define IEM_MC_MEM_SEG_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
598 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
599
600/**
601 * Maps guest memory for word readonly direct (or bounce) buffer acccess.
602 *
603 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
604 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
605 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
606 * @param[in] a_GCPtrMem The memory address.
607 * @remarks Will return/long jump on errors.
608 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
609 */
610#define IEM_MC_MEM_SEG_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
611 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
612
613/**
614 * Maps guest memory for word atomic read+write direct (or bounce) buffer
615 * acccess, flat address variant.
616 *
617 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
618 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
619 * @param[in] a_GCPtrMem The memory address.
620 * @remarks Will return/long jump on errors.
621 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
622 */
623#define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
624 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
625
626/**
627 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
628 * address variant.
629 *
630 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
631 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
632 * @param[in] a_GCPtrMem The memory address.
633 * @remarks Will return/long jump on errors.
634 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
635 */
636#define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
637 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
638
639/**
640 * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
641 * address variant.
642 *
643 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
644 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
645 * @param[in] a_GCPtrMem The memory address.
646 * @remarks Will return/long jump on errors.
647 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
648 */
649#define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
650 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
651
652/**
653 * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
654 * address variant.
655 *
656 * @param[out] a_pu16Mem Where to return the pointer to the mapping.
657 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
658 * @param[in] a_GCPtrMem The memory address.
659 * @remarks Will return/long jump on errors.
660 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
661 */
662#define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
663 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
664
665/** int16_t alias. */
666#define IEM_MC_MEM_SEG_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
667 (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
668
669/** Flat int16_t alias. */
670#define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
671 (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
672
673
674/* 32-bit */
675
676/**
677 * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess.
678 *
679 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
680 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
681 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
682 * @param[in] a_GCPtrMem The memory address.
683 * @remarks Will return/long jump on errors.
684 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
685 */
686#define IEM_MC_MEM_SEG_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
687 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
688
689/**
690 * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
691 *
692 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
693 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
694 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
695 * @param[in] a_GCPtrMem The memory address.
696 * @remarks Will return/long jump on errors.
697 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
698 */
699#define IEM_MC_MEM_SEG_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
700 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
701
702/**
703 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
704 *
705 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
706 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
707 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
708 * @param[in] a_GCPtrMem The memory address.
709 * @remarks Will return/long jump on errors.
710 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
711 */
712#define IEM_MC_MEM_SEG_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
713 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
714
715/**
716 * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
717 *
718 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
719 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
720 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
721 * @param[in] a_GCPtrMem The memory address.
722 * @remarks Will return/long jump on errors.
723 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
724 */
725#define IEM_MC_MEM_SEG_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
726 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
727
728/**
729 * Maps guest memory for dword atomic read+write direct (or bounce) buffer
730 * acccess, flat address variant.
731 *
732 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
733 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
734 * @param[in] a_GCPtrMem The memory address.
735 * @remarks Will return/long jump on errors.
736 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
737 */
738#define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
739 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
740
741/**
742 * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
743 * flat address variant.
744 *
745 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
746 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
747 * @param[in] a_GCPtrMem The memory address.
748 * @remarks Will return/long jump on errors.
749 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
750 */
751#define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
752 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
753
754/**
755 * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
756 * address variant.
757 *
758 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
759 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
760 * @param[in] a_GCPtrMem The memory address.
761 * @remarks Will return/long jump on errors.
762 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
763 */
764#define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
765 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
766
767/**
768 * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
769 * address variant.
770 *
771 * @param[out] a_pu32Mem Where to return the pointer to the mapping.
772 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
773 * @param[in] a_GCPtrMem The memory address.
774 * @remarks Will return/long jump on errors.
775 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
776 */
777#define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
778 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
779
780/** int32_t alias. */
781#define IEM_MC_MEM_SEG_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
782 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
783
784/** Flat int32_t alias. */
785#define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
786 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
787
788/** RTFLOAT32U alias. */
789#define IEM_MC_MEM_SEG_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
790 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
791
792/** Flat RTFLOAT32U alias. */
793#define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
794 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
795
796
797/* 64-bit */
798
799/**
800 * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess.
801 *
802 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
803 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
804 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
805 * @param[in] a_GCPtrMem The memory address.
806 * @remarks Will return/long jump on errors.
807 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
808 */
809#define IEM_MC_MEM_SEG_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
810 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
811
812/**
813 * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
814 *
815 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
816 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
817 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
818 * @param[in] a_GCPtrMem The memory address.
819 * @remarks Will return/long jump on errors.
820 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
821 */
822#define IEM_MC_MEM_SEG_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
823 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
824
825/**
826 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
827 *
828 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
829 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
830 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
831 * @param[in] a_GCPtrMem The memory address.
832 * @remarks Will return/long jump on errors.
833 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
834 */
835#define IEM_MC_MEM_SEG_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
836 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
837
838/**
839 * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
840 *
841 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
842 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
843 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
844 * @param[in] a_GCPtrMem The memory address.
845 * @remarks Will return/long jump on errors.
846 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
847 */
848#define IEM_MC_MEM_SEG_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
849 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
850
851/**
852 * Maps guest memory for qword atomic read+write direct (or bounce) buffer
853 * acccess, flat address variant.
854 *
855 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
856 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
857 * @param[in] a_GCPtrMem The memory address.
858 * @remarks Will return/long jump on errors.
859 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
860 */
861#define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
862 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
863
864/**
865 * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
866 * flat address variant.
867 *
868 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
869 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
870 * @param[in] a_GCPtrMem The memory address.
871 * @remarks Will return/long jump on errors.
872 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
873 */
874#define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
875 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
876
877/**
878 * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
879 * address variant.
880 *
881 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
882 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
883 * @param[in] a_GCPtrMem The memory address.
884 * @remarks Will return/long jump on errors.
885 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
886 */
887#define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
888 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
889
890/**
891 * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
892 * address variant.
893 *
894 * @param[out] a_pu64Mem Where to return the pointer to the mapping.
895 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
896 * @param[in] a_GCPtrMem The memory address.
897 * @remarks Will return/long jump on errors.
898 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
899 */
900#define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
901 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
902
903/** int64_t alias. */
904#define IEM_MC_MEM_SEG_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
905 (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
906
907/** Flat int64_t alias. */
908#define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
909 (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
910
911/** RTFLOAT64U alias. */
912#define IEM_MC_MEM_SEG_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
913 (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
914
915/** Flat RTFLOAT64U alias. */
916#define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
917 (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
918
919
920/* 128-bit */
921
922/**
923 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess.
924 *
925 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
926 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
927 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
928 * @param[in] a_GCPtrMem The memory address.
929 * @remarks Will return/long jump on errors.
930 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
931 */
932#define IEM_MC_MEM_SEG_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
933 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
934
935/**
936 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess.
937 *
938 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
939 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
940 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
941 * @param[in] a_GCPtrMem The memory address.
942 * @remarks Will return/long jump on errors.
943 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
944 */
945#define IEM_MC_MEM_SEG_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
946 (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
947
948/**
949 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess.
950 *
951 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
952 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
953 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
954 * @param[in] a_GCPtrMem The memory address.
955 * @remarks Will return/long jump on errors.
956 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
957 */
958#define IEM_MC_MEM_SEG_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
959 (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
960
961/**
962 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess.
963 *
964 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
965 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
966 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX!
967 * @param[in] a_GCPtrMem The memory address.
968 * @remarks Will return/long jump on errors.
969 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
970 */
971#define IEM_MC_MEM_SEG_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
972 (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
973
974/**
975 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer
976 * access, flat address variant.
977 *
978 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
979 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
980 * @param[in] a_GCPtrMem The memory address.
981 * @remarks Will return/long jump on errors.
982 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
983 */
984#define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
985 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
986
987/**
988 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess,
989 * flat address variant.
990 *
991 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
992 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
993 * @param[in] a_GCPtrMem The memory address.
994 * @remarks Will return/long jump on errors.
995 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW
996 */
997#define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
998 (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
999
1000/**
1001 * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess,
1002 * flat address variant.
1003 *
1004 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1005 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1006 * @param[in] a_GCPtrMem The memory address.
1007 * @remarks Will return/long jump on errors.
1008 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO
1009 */
1010#define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
1011 (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1012
1013/**
1014 * Maps guest memory for dqword readonly direct (or bounce) buffer acccess, flat
1015 * address variant.
1016 *
1017 * @param[out] a_pu128Mem Where to return the pointer to the mapping.
1018 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
1019 * @param[in] a_GCPtrMem The memory address.
1020 * @remarks Will return/long jump on errors.
1021 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO
1022 */
1023#define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
1024 (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
1025
1026
1027/* commit + unmap */
1028
1029/** Commits the memory and unmaps guest memory previously mapped RW.
1030 * @remarks May return.
1031 * @note Implictly frees the a_bMapInfo variable.
1032 */
1033#define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
1034
1035/** Commits the memory and unmaps guest memory previously mapped ATOMIC.
1036 * @remarks May return.
1037 * @note Implictly frees the a_bMapInfo variable.
1038 */
1039#define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
1040
1041/** Commits the memory and unmaps guest memory previously mapped W.
1042 * @remarks May return.
1043 * @note Implictly frees the a_bMapInfo variable.
1044 */
1045#define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
1046
1047/** Commits the memory and unmaps guest memory previously mapped R.
1048 * @remarks May return.
1049 * @note Implictly frees the a_bMapInfo variable.
1050 */
1051#define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
1052
1053
1054/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
1055 * @note Implictly frees the a_bMapInfo variable. */
1056#define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
1057
1058
1059
1060/** The @a a_fSupportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */
1061#define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (false) {
1062#define IEM_MC_NATIVE_ELSE() } else {
1063#define IEM_MC_NATIVE_ENDIF() } ((void)0)
1064
1065#define IEM_MC_NATIVE_EMIT_0(a_fnEmitter)
1066#define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0) (void)(a0)
1067#define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
1068#define IEM_MC_NATIVE_EMIT_2_EX(a_fnEmitter, a0, a1) (void)(a0), (void)(a1)
1069#define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2) (void)(a0), (void)(a1), (void)(a2)
1070#define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3) (void)(a0), (void)(a1), (void)(a2), (void)(a3)
1071#define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4)
1072#define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5)
1073#define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6)
1074#define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7)
1075
1076/** This can be used to direct the register allocator when dealing with
1077 * x86/AMD64 instructions (like SHL reg,CL) that takes fixed registers. */
1078#define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0)
1079
1080
1081#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
1082#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
1083#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
1084#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
1085#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
1086#define IEM_MC_CALL_AIMPL_3(a_rcType, a_rc, a_pfn, a0, a1, a2) a_rcType const a_rc = (a_pfn)((a0), (a1), (a2))
1087#define IEM_MC_CALL_AIMPL_4(a_rcType, a_rc, a_pfn, a0, a1, a2, a3) a_rcType const a_rc = (a_pfn)((a0), (a1), (a2), (a3))
1088
1089
1090/** @def IEM_MC_CALL_CIMPL_HLP_RET
1091 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
1092 */
1093#if defined(VBOX_STRICT) && defined(VBOX_VMM_TARGET_X86)
1094# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
1095 do { \
1096 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
1097 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \
1098 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \
1099 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \
1100 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \
1101 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
1102 if (rcStrictHlp == VINF_SUCCESS) \
1103 { \
1104 uint64_t const fRipMask = (pVCpu->iem.s.fExec & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT ? UINT64_MAX : UINT32_MAX; \
1105 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
1106 || ( ((uRipBefore + cbInstr) & fRipMask) == pVCpu->cpum.GstCtx.rip \
1107 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \
1108 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \
1109 && uRipBefore == pVCpu->cpum.GstCtx.rip \
1110 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \
1111 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
1112 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, (uRipBefore + cbInstr) & fRipMask)); \
1113 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
1114 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
1115 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
1116 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
1117 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
1118 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
1119 else \
1120 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
1121 == (fEflBefore & ~(X86_EFL_RF)), \
1122 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
1123 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
1124 { \
1125 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
1126 AssertMsg( fExecBefore == fExecRecalc \
1127 /* in case ES, DS or SS was external initially (happens alot with HM): */ \
1128 || ( fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \
1129 && (fExecRecalc & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT), \
1130 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
1131 } \
1132 } \
1133 return rcStrictHlp; \
1134 } while (0)
1135#else
1136# define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
1137#endif
1138
1139/**
1140 * Defers the rest of the instruction emulation to a C implementation routine
1141 * and returns, only taking the standard parameters.
1142 *
1143 * @param a_fFlags IEM_CIMPL_F_XXX.
1144 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1145 * in the native recompiler.
1146 * @param a_pfnCImpl The pointer to the C routine.
1147 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
1148 */
1149#ifdef IEM_GET_INSTR_LEN
1150# define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
1151 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
1152#else
1153# define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
1154 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu))
1155#endif
1156
1157/**
1158 * Defers the rest of instruction emulation to a C implementation routine and
1159 * returns, taking one argument in addition to the standard ones.
1160 *
1161 * @param a_fFlags IEM_CIMPL_F_XXX.
1162 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1163 * in the native recompiler.
1164 * @param a_pfnCImpl The pointer to the C routine.
1165 * @param a0 The argument.
1166 */
1167#ifdef IEM_GET_INSTR_LEN
1168# define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
1169 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
1170#else
1171# define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
1172 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0))
1173#endif
1174
1175/**
1176 * Defers the rest of the instruction emulation to a C implementation routine
1177 * and returns, taking two arguments in addition to the standard ones.
1178 *
1179 * @param a_fFlags IEM_CIMPL_F_XXX.
1180 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1181 * in the native recompiler.
1182 * @param a_pfnCImpl The pointer to the C routine.
1183 * @param a0 The first extra argument.
1184 * @param a1 The second extra argument.
1185 */
1186#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1187# define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
1188 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
1189#else
1190# define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
1191 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0, a1))
1192#endif
1193
1194/**
1195 * Defers the rest of the instruction emulation to a C implementation routine
1196 * and returns, taking three arguments in addition to the standard ones.
1197 *
1198 * @param a_fFlags IEM_CIMPL_F_XXX.
1199 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1200 * in the native recompiler.
1201 * @param a_pfnCImpl The pointer to the C routine.
1202 * @param a0 The first extra argument.
1203 * @param a1 The second extra argument.
1204 * @param a2 The third extra argument.
1205 */
1206#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1207# define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
1208 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
1209#else
1210# define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
1211 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0, a1, a2))
1212#endif
1213
1214/**
1215 * Defers the rest of the instruction emulation to a C implementation routine
1216 * and returns, taking four arguments in addition to the standard ones.
1217 *
1218 * @param a_fFlags IEM_CIMPL_F_XXX.
1219 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1220 * in the native recompiler.
1221 * @param a_pfnCImpl The pointer to the C routine.
1222 * @param a0 The first extra argument.
1223 * @param a1 The second extra argument.
1224 * @param a2 The third extra argument.
1225 * @param a3 The fourth extra argument.
1226 */
1227#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1228# define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
1229 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
1230#else
1231# define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
1232 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0, a1, a2, a3))
1233#endif
1234
1235/**
1236 * Defers the rest of the instruction emulation to a C implementation routine
1237 * and returns, taking five arguments in addition to the standard ones.
1238 *
1239 * @param a_fFlags IEM_CIMPL_F_XXX.
1240 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1241 * in the native recompiler.
1242 * @param a_pfnCImpl The pointer to the C routine.
1243 * @param a0 The first extra argument.
1244 * @param a1 The second extra argument.
1245 * @param a2 The third extra argument.
1246 * @param a3 The fourth extra argument.
1247 * @param a4 The fifth extra argument.
1248 */
1249#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1250# define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
1251 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
1252#else
1253# define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
1254 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0, a1, a2, a3, a4))
1255#endif
1256
1257/**
1258 * Defers the entire instruction emulation to a C implementation routine and
1259 * returns, only taking the standard parameters.
1260 *
1261 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1262 *
1263 * @param a_fFlags IEM_CIMPL_F_XXX.
1264 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1265 * in the native recompiler.
1266 * @param a_pfnCImpl The pointer to the C routine.
1267 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
1268 */
1269#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1270# define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
1271 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
1272#else
1273# define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
1274 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu))
1275#endif
1276
1277/**
1278 * Defers the entire instruction emulation to a C implementation routine and
1279 * returns, taking one argument in addition to the standard ones.
1280 *
1281 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1282 *
1283 * @param a_fFlags IEM_CIMPL_F_XXX.
1284 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1285 * in the native recompiler.
1286 * @param a_pfnCImpl The pointer to the C routine.
1287 * @param a0 The argument.
1288 */
1289#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1290# define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
1291 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
1292#else
1293# define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
1294 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0))
1295#endif
1296
1297/**
1298 * Defers the entire instruction emulation to a C implementation routine and
1299 * returns, taking two arguments in addition to the standard ones.
1300 *
1301 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1302 *
1303 * @param a_fFlags IEM_CIMPL_F_XXX.
1304 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1305 * in the native recompiler.
1306 * @param a_pfnCImpl The pointer to the C routine.
1307 * @param a0 The first extra argument.
1308 * @param a1 The second extra argument.
1309 */
1310#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1311# define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
1312 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
1313#else
1314# define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
1315 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0, a1))
1316#endif
1317
1318/**
1319 * Defers the entire instruction emulation to a C implementation routine and
1320 * returns, taking three arguments in addition to the standard ones.
1321 *
1322 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
1323 *
1324 * @param a_fFlags IEM_CIMPL_F_XXX.
1325 * @param a_fGstShwFlush Guest shadow register copies needing to be flushed
1326 * in the native recompiler.
1327 * @param a_pfnCImpl The pointer to the C routine.
1328 * @param a0 The first extra argument.
1329 * @param a1 The second extra argument.
1330 * @param a2 The third extra argument.
1331 */
1332#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
1333# define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
1334 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
1335#else
1336# define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
1337 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, a0, a1, a2))
1338#endif
1339
1340
1341
1342/**
1343 * Calls a MMX assembly implementation taking two visible arguments.
1344 *
1345 * @param a_pfnAImpl Pointer to the assembly MMX routine.
1346 * @param a0 The first extra argument.
1347 * @param a1 The second extra argument.
1348 */
1349#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
1350 do { \
1351 IEM_MC_PREPARE_FPU_USAGE(); \
1352 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
1353 } while (0)
1354
1355/**
1356 * Calls a MMX assembly implementation taking three visible arguments.
1357 *
1358 * @param a_pfnAImpl Pointer to the assembly MMX routine.
1359 * @param a0 The first extra argument.
1360 * @param a1 The second extra argument.
1361 * @param a2 The third extra argument.
1362 */
1363#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1364 do { \
1365 IEM_MC_PREPARE_FPU_USAGE(); \
1366 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
1367 } while (0)
1368
1369
1370/**
1371 * Calls a SSE assembly implementation taking two visible arguments.
1372 *
1373 * @param a_pfnAImpl Pointer to the assembly SSE routine.
1374 * @param a0 The first extra argument.
1375 * @param a1 The second extra argument.
1376 *
1377 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
1378 * which is unmasked in the guest's MXCSR.
1379 */
1380#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
1381 do { \
1382 IEM_MC_PREPARE_SSE_USAGE(); \
1383 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
1384 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
1385 (a0), (a1)); \
1386 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
1387 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
1388 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
1389 { /* probable */ } \
1390 else \
1391 { \
1392 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
1393 return iemRaiseSimdFpException(pVCpu); \
1394 return iemRaiseUndefinedOpcode(pVCpu); \
1395 } \
1396 } while (0)
1397
1398/**
1399 * Calls a SSE assembly implementation taking three visible arguments.
1400 *
1401 * @param a_pfnAImpl Pointer to the assembly SSE routine.
1402 * @param a0 The first extra argument.
1403 * @param a1 The second extra argument.
1404 * @param a2 The third extra argument.
1405 *
1406 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
1407 * which is unmasked in the guest's MXCSR.
1408 */
1409#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1410 do { \
1411 IEM_MC_PREPARE_SSE_USAGE(); \
1412 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
1413 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
1414 (a0), (a1), (a2)); \
1415 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
1416 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
1417 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
1418 { /* probable */ } \
1419 else \
1420 { \
1421 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
1422 return iemRaiseSimdFpException(pVCpu); \
1423 return iemRaiseUndefinedOpcode(pVCpu); \
1424 } \
1425 } while (0)
1426
1427
1428/**
1429 * Calls a AVX assembly implementation taking two visible arguments.
1430 *
1431 * There is one implicit zero'th argument, a pointer to the extended state.
1432 *
1433 * @param a_pfnAImpl Pointer to the assembly AVX routine.
1434 * @param a0 The first extra argument.
1435 * @param a1 The second extra argument.
1436 *
1437 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
1438 * which is unmasked in the guest's MXCSR.
1439 */
1440#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a0, a1) \
1441 do { \
1442 IEM_MC_PREPARE_AVX_USAGE(); \
1443 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
1444 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
1445 (a0), (a1)); \
1446 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
1447 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
1448 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
1449 { /* probable */ } \
1450 else \
1451 { \
1452 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
1453 return iemRaiseSimdFpException(pVCpu); \
1454 return iemRaiseUndefinedOpcode(pVCpu); \
1455 } \
1456 } while (0)
1457
1458/**
1459 * Calls a AVX assembly implementation taking three visible arguments.
1460 *
1461 * There is one implicit zero'th argument, a pointer to the extended state.
1462 *
1463 * @param a_pfnAImpl Pointer to the assembly AVX routine.
1464 * @param a0 The first extra argument.
1465 * @param a1 The second extra argument.
1466 * @param a2 The third extra argument.
1467 *
1468 * @note This throws an \#XF/\#UD exception if the helper indicates an exception
1469 * which is unmasked in the guest's MXCSR.
1470 */
1471#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
1472 do { \
1473 IEM_MC_PREPARE_AVX_USAGE(); \
1474 const uint32_t fMxcsrOld = pVCpu->cpum.GstCtx.XState.x87.MXCSR; \
1475 const uint32_t fMxcsrNew = a_pfnAImpl(fMxcsrOld & ~X86_MXCSR_XCPT_FLAGS, \
1476 (a0), (a1), (a2)); \
1477 pVCpu->cpum.GstCtx.XState.x87.MXCSR |= fMxcsrNew; \
1478 if (RT_LIKELY(( ~((fMxcsrOld & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
1479 & (fMxcsrNew & X86_MXCSR_XCPT_FLAGS)) == 0)) \
1480 { /* probable */ } \
1481 else \
1482 { \
1483 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT) \
1484 return iemRaiseSimdFpException(pVCpu); \
1485 return iemRaiseUndefinedOpcode(pVCpu); \
1486 } \
1487 } while (0)
1488
1489/*
1490 * x86: EFL == RFLAGS/EFLAGS for x86.
1491 * arm: EFL == NZCV.
1492 */
1493
1494/** @note x86: Not for IOPL or IF testing. */
1495#define IEM_MC_IF_FLAGS_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
1496/** @note x86: Not for IOPL or IF testing. */
1497#define IEM_MC_IF_FLAGS_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
1498/** @note x86: Not for IOPL or IF testing. */
1499#define IEM_MC_IF_FLAGS_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
1500/** @note x86: Not for IOPL or IF testing. */
1501#define IEM_MC_IF_FLAGS_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
1502/** @note x86: Not for IOPL or IF testing. */
1503#define IEM_MC_IF_FLAGS_BITS_NE(a_fBit1, a_fBit2) \
1504 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1505 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1506/** @note x86: Not for IOPL or IF testing. */
1507#define IEM_MC_IF_FLAGS_BITS_EQ(a_fBit1, a_fBit2) \
1508 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1509 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1510/** @note x86: Not for IOPL or IF testing. */
1511#define IEM_MC_IF_FLAGS_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
1512 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
1513 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1514 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1515/** @note x86: Not for IOPL or IF testing. */
1516#define IEM_MC_IF_FLAGS_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
1517 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
1518 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
1519 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
1520
1521#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
1522#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
1523
1524#define IEM_MC_ELSE() } else {
1525#define IEM_MC_ENDIF() } do {} while (0)
1526
1527
1528/** Recompiler debugging: Flush guest register shadow copies. */
1529#define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush) ((void)0)
1530
1531/** Recompiler liveness info: input GPR */
1532#define IEM_MC_LIVENESS_GREG_INPUT(a_iGReg) ((void)0)
1533/** Recompiler liveness info: clobbered GPR */
1534#define IEM_MC_LIVENESS_GREG_CLOBBER(a_iGReg) ((void)0)
1535/** Recompiler liveness info: modified GPR register (i.e. input & output) */
1536#define IEM_MC_LIVENESS_GREG_MODIFY(a_iGReg) ((void)0)
1537
1538/** Recompiler liveness info: input MM register */
1539#define IEM_MC_LIVENESS_MREG_INPUT(a_iMReg) ((void)0)
1540/** Recompiler liveness info: clobbered MM register */
1541#define IEM_MC_LIVENESS_MREG_CLOBBER(a_iMReg) ((void)0)
1542/** Recompiler liveness info: modified MM register (i.e. input & output) */
1543#define IEM_MC_LIVENESS_MREG_MODIFY(a_iMReg) ((void)0)
1544
1545/** Recompiler liveness info: input SSE register */
1546#define IEM_MC_LIVENESS_XREG_INPUT(a_iXReg) ((void)0)
1547/** Recompiler liveness info: clobbered SSE register */
1548#define IEM_MC_LIVENESS_XREG_CLOBBER(a_iXReg) ((void)0)
1549/** Recompiler liveness info: modified SSE register (i.e. input & output) */
1550#define IEM_MC_LIVENESS_XREG_MODIFY(a_iXReg) ((void)0)
1551
1552/** Recompiler liveness info: input MXCSR */
1553#define IEM_MC_LIVENESS_MXCSR_INPUT() ((void)0)
1554/** Recompiler liveness info: clobbered MXCSR */
1555#define IEM_MC_LIVENESS_MXCSR_CLOBBER() ((void)0)
1556/** Recompiler liveness info: modified MXCSR (i.e. input & output) */
1557#define IEM_MC_LIVENESS_MXCSR_MODIFY() ((void)0)
1558
1559
1560/** @} */
1561
1562/*
1563 * Include the target specific header.
1564 */
1565#ifdef VBOX_VMM_TARGET_X86
1566# include "VMMAll/target-x86/IEMMc-x86.h"
1567#elif defined(VBOX_VMM_TARGET_ARMV8)
1568//# include "VMMAll/target-armv8/IEMMc-armv8.h"
1569#else
1570# error "port me"
1571#endif
1572
1573#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
1574
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette