VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp@ 101189

Last change on this file since 101189 was 100811, checked in by vboxsync, 17 months ago

VMM/IEM: Working on implementing the FLAT mode (64-bit mode and 32-bit FLAT) optimizations. Introduced a special 64-bit FS+GS(+CS) variant so we can deal with it the same way as the flat 32-bit variant, this means lumping CS prefixed stuff (unlikely) in with FS and GS. We call the FLAT variant for DS, ES, and SS accesses and the other mode for memory accesses via FS, GS and CS. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 27.9 KB
Line 
1/* $Id: IEMAllThrdFuncs.cpp 100811 2023-08-06 01:54:38Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Threaded Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#define IEM_WITH_OPAQUE_DECODER_STATE
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70#include "IEMMc.h"
71
72#include "IEMThreadedFunctions.h"
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78
79/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
80 * and only used when we're in 16-bit code on a pre-386 CPU. */
81#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr) \
82 return iemRegAddToIp16AndFinishingClearingRF(pVCpu, a_cbInstr)
83
84/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
85 * and used for 16-bit and 32-bit code on 386 and later CPUs. */
86#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr) \
87 return iemRegAddToEip32AndFinishingClearingRF(pVCpu, a_cbInstr)
88
89/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
90 * and only used when we're in 64-bit code. */
91#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr) \
92 return iemRegAddToRip64AndFinishingClearingRF(pVCpu, a_cbInstr)
93
94#undef IEM_MC_ADVANCE_RIP_AND_FINISH
95
96
97/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as extra
98 * parameter, for use in 16-bit code on a pre-386 CPU. */
99#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr) \
100 return iemRegIp16RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8))
101
102/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
103 * size as extra parameters, for use in 16-bit and 32-bit code on 386 and
104 * later CPUs. */
105#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize) \
106 return iemRegEip32RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize)
107
108/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
109 * size as extra parameters, for use in 64-bit code. */
110#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize) \
111 return iemRegRip64RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize)
112
113#undef IEM_MC_REL_JMP_S8_AND_FINISH
114
115
116/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
117 * param, for use in 16-bit code on a pre-386 CPU. */
118#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) \
119 return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
120
121/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
122 * param, for use in 16-bit and 32-bit code on 386 and later CPUs. */
123#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr) \
124 return iemRegEip32RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
125
126/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
127 * param, for use in 64-bit code. */
128#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr) \
129 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
130
131#undef IEM_MC_REL_JMP_S16_AND_FINISH
132
133
134/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
135 * an extra parameter - dummy for pre-386 variations not eliminated by the
136 * python script. */
137#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr) \
138 do { RT_NOREF(pVCpu, a_i32, a_cbInstr); AssertFailedReturn(VERR_IEM_IPE_9); } while (0)
139
140/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
141 * an extra parameter, for use in 16-bit and 32-bit code on 386+. */
142#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr) \
143 return iemRegEip32RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32))
144
145/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
146 * an extra parameter, for use in 64-bit code. */
147#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr) \
148 return iemRegRip64RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32))
149
150#undef IEM_MC_REL_JMP_S32_AND_FINISH
151
152
153/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 16-bit. */
154#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp) \
155 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
156
157/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, pre-386 16-bit. */
158#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16_PRE386(a_GCPtrEff, a_bRm, a_u16Disp) \
159 IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp)
160
161/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 32-bit with address prefix. */
162#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \
163 IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp)
164
165
166/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 32-bit. */
167#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
168 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_uSibAndRspOffset, a_u32Disp)
169
170/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 32-bit flat. */
171#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32_FLAT(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
172 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_uSibAndRspOffset, a_u32Disp)
173
174/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters, 16-bit with address prefix. */
175#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16_ADDR32(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
176 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_uSibAndRspOffset, a_u32Disp)
177
178
179/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
180#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
181 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
182
183/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
184#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
185 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
186
187/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters.
188 * @todo How did that address prefix thing work for 64-bit code again? */
189#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
190 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
191
192#undef IEM_MC_CALC_RM_EFF_ADDR
193
194
195/** Variant of IEM_MC_CALL_CIMPL_1 with explicit instruction length parameter. */
196#define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0) \
197 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
198#undef IEM_MC_CALL_CIMPL_1
199
200/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
201#define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1) \
202 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
203#undef IEM_MC_CALL_CIMPL_2
204
205/** Variant of IEM_MC_CALL_CIMPL_3 with explicit instruction length parameter. */
206#define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2) \
207 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
208#undef IEM_MC_CALL_CIMPL_3
209
210/** Variant of IEM_MC_CALL_CIMPL_4 with explicit instruction length parameter. */
211#define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2, a3) \
212 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
213#undef IEM_MC_CALL_CIMPL_4
214
215/** Variant of IEM_MC_CALL_CIMPL_5 with explicit instruction length parameter. */
216#define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2, a3, a4) \
217 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
218#undef IEM_MC_CALL_CIMPL_5
219
220
221/** Variant of IEM_MC_DEFER_TO_CIMPL_0_RET with explicit instruction
222 * length parameter. */
223#define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl) \
224 return (a_pfnCImpl)(pVCpu, (a_cbInstr))
225#undef IEM_MC_DEFER_TO_CIMPL_0_RET
226
227/** Variant of IEM_MC_DEFER_TO_CIMPL_1_RET with explicit instruction
228 * length parameter. */
229#define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0) \
230 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
231#undef IEM_MC_DEFER_TO_CIMPL_1_RET
232
233/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
234#define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1) \
235 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
236#undef IEM_MC_DEFER_TO_CIMPL_2_RET
237
238/** Variant of IEM_MC_DEFER_TO_CIMPL_3 with explicit instruction length
239 * parameter. */
240#define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2) \
241 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
242#undef IEM_MC_DEFER_TO_CIMPL_3_RET
243
244/** Variant of IEM_MC_DEFER_TO_CIMPL_4 with explicit instruction length
245 * parameter. */
246#define IEM_MC_DEFER_TO_CIMPL_4_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2, a3) \
247 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
248#undef IEM_MC_DEFER_TO_CIMPL_4_RET
249
250/** Variant of IEM_MC_DEFER_TO_CIMPL_5 with explicit instruction length
251 * parameter. */
252#define IEM_MC_DEFER_TO_CIMPL_5_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2, a3, a4) \
253 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
254#undef IEM_MC_DEFER_TO_CIMPL_5_RET
255
256
257/** Variant of IEM_MC_FETCH_GREG_U8 with extended (20) register index. */
258#define IEM_MC_FETCH_GREG_U8_THREADED(a_u8Dst, a_iGRegEx) \
259 (a_u8Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
260
261/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U16 with extended (20) register index. */
262#define IEM_MC_FETCH_GREG_U8_ZX_U16_THREADED(a_u16Dst, a_iGRegEx) \
263 (a_u16Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
264
265/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U32 with extended (20) register index. */
266#define IEM_MC_FETCH_GREG_U8_ZX_U32_THREADED(a_u32Dst, a_iGRegEx) \
267 (a_u32Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
268
269/** Variant of IEM_MC_FETCH_GREG_U8_ZX_U64 with extended (20) register index. */
270#define IEM_MC_FETCH_GREG_U8_ZX_U64_THREADED(a_u64Dst, a_iGRegEx) \
271 (a_u64Dst) = iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
272
273/** Variant of IEM_MC_FETCH_GREG_U8_SX_U16 with extended (20) register index. */
274#define IEM_MC_FETCH_GREG_U8_SX_U16_THREADED(a_u16Dst, a_iGRegEx) \
275 (a_u16Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
276
277/** Variant of IEM_MC_FETCH_GREG_U8_SX_U32 with extended (20) register index. */
278#define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \
279 (a_u32Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
280#undef IEM_MC_FETCH_GREG_U8_SX_U32
281
282/** Variant of IEM_MC_FETCH_GREG_U8_SX_U64 with extended (20) register index. */
283#define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \
284 (a_u64Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
285#undef IEM_MC_FETCH_GREG_U8_SX_U64
286
287/** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
288#define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
289 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
290#undef IEM_MC_STORE_GREG_U8
291
292/** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
293#define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \
294 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
295#undef IEM_MC_STORE_GREG_U8
296
297/** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */
298#define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \
299 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
300#undef IEM_MC_REF_GREG_U8
301
302/** Variant of IEM_MC_ADD_GREG_U8 with extended (20) register index. */
303#define IEM_MC_ADD_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
304 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) += (a_u8Value)
305#undef IEM_MC_ADD_GREG_U8
306
307/** Variant of IEM_MC_SUB_GREG_U8 with extended (20) register index. */
308#define IEM_MC_SUB_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
309 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) -= (a_u8Value)
310#undef IEM_MC_SUB_GREG_U8
311
312/** Variant of IEM_MC_ADD_GREG_U8_TO_LOCAL with extended (20) register index. */
313#define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) \
314 do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0)
315#undef IEM_MC_ADD_GREG_U8_TO_LOCAL
316
317/** Variant of IEM_MC_AND_GREG_U8 with extended (20) register index. */
318#define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
319 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) &= (a_u8Value)
320#undef IEM_MC_AND_GREG_U8
321
322/** Variant of IEM_MC_OR_GREG_U8 with extended (20) register index. */
323#define IEM_MC_OR_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
324 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) |= (a_u8Value)
325#undef IEM_MC_OR_GREG_U8
326
327/**
328 * Calculates the effective address of a ModR/M memory operand, 16-bit
329 * addressing variant.
330 *
331 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16.
332 *
333 * @returns The effective address.
334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
335 * @param bRm The ModRM byte.
336 * @param u16Disp The displacement byte/word, if any.
337 * RIP relative addressing.
338 */
339static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT
340{
341 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x u16Disp=%#x\n", bRm, u16Disp));
342 Assert(!IEM_IS_64BIT_CODE(pVCpu));
343
344 /* Handle the disp16 form with no registers first. */
345 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
346 {
347 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp));
348 return u16Disp;
349 }
350
351 /* Get the displacment. */
352 /** @todo we can eliminate this step by making u16Disp have this value
353 * already! */
354 uint16_t u16EffAddr;
355 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
356 {
357 case 0: u16EffAddr = 0; break;
358 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
359 case 2: u16EffAddr = u16Disp; break;
360 default: AssertFailedStmt(u16EffAddr = 0);
361 }
362
363 /* Add the base and index registers to the disp. */
364 switch (bRm & X86_MODRM_RM_MASK)
365 {
366 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
367 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
368 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break;
369 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break;
370 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
371 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
372 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break;
373 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
374 }
375
376 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr));
377 return u16EffAddr;
378}
379
380
381/**
382 * Calculates the effective address of a ModR/M memory operand, 32-bit
383 * addressing variant.
384 *
385 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and
386 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT.
387 *
388 * @returns The effective address.
389 * @param pVCpu The cross context virtual CPU structure of the
390 * calling thread.
391 * @param bRm The ModRM byte.
392 * @param uSibAndRspOffset Two parts:
393 * - The first 8 bits make up the SIB byte.
394 * - The next 8 bits are the fixed RSP/ESP offse
395 * in case of a pop [xSP].
396 * @param u32Disp The displacement byte/dword, if any.
397 */
398static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint32_t uSibAndRspOffset,
399 uint32_t u32Disp) RT_NOEXCEPT
400{
401 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x uSibAndRspOffset=%#x u32Disp=%#x\n", bRm, uSibAndRspOffset, u32Disp));
402
403 /* Handle the disp32 form with no registers first. */
404 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
405 {
406 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp));
407 return u32Disp;
408 }
409
410 /* Get the register (or SIB) value. */
411 uint32_t u32EffAddr;
412#ifdef _MSC_VER
413 u32EffAddr = 0;/* MSC uninitialized variable analysis is too simple, it seems. */
414#endif
415 switch (bRm & X86_MODRM_RM_MASK)
416 {
417 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
418 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
419 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
420 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
421 case 4: /* SIB */
422 {
423 /* Get the index and scale it. */
424 switch ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
425 {
426 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
427 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
428 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
429 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
430 case 4: u32EffAddr = 0; /*none */ break;
431 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
432 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
433 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
434 }
435 u32EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
436
437 /* add base */
438 switch (uSibAndRspOffset & X86_SIB_BASE_MASK)
439 {
440 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
441 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
442 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
443 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
444 case 4:
445 u32EffAddr += pVCpu->cpum.GstCtx.esp;
446 u32EffAddr += uSibAndRspOffset >> 8;
447 break;
448 case 5:
449 if ((bRm & X86_MODRM_MOD_MASK) != 0)
450 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
451 else
452 u32EffAddr += u32Disp;
453 break;
454 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
455 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
456 }
457 break;
458 }
459 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
460 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
461 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
462 }
463
464 /* Get and add the displacement. */
465 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
466 {
467 case 0: break;
468 case 1: u32EffAddr += (int8_t)u32Disp; break;
469 case 2: u32EffAddr += u32Disp; break;
470 default: AssertFailed();
471 }
472
473 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr));
474 return u32EffAddr;
475}
476
477
478/**
479 * Calculates the effective address of a ModR/M memory operand.
480 *
481 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64.
482 *
483 * @returns The effective address.
484 * @param pVCpu The cross context virtual CPU structure of the
485 * calling thread.
486 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
487 * bit 4 to REX.X. The two bits are part of the
488 * REG sub-field, which isn't needed in this
489 * function.
490 * @param uSibAndRspOffset Two parts:
491 * - The first 8 bits make up the SIB byte.
492 * - The next 8 bits are the fixed RSP/ESP offse
493 * in case of a pop [xSP].
494 * @param u32Disp The displacement byte/word/dword, if any.
495 * @param cbInstr The size of the fully decoded instruction. Used
496 * for RIP relative addressing.
497 * @todo combine cbInstr and cbImm!
498 */
499static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint32_t uSibAndRspOffset,
500 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT
501{
502 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx));
503 Assert(IEM_IS_64BIT_CODE(pVCpu));
504
505 uint64_t u64EffAddr;
506
507 /* Handle the rip+disp32 form with no registers first. */
508 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
509 {
510 u64EffAddr = (int32_t)u32Disp;
511 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr;
512 }
513 else
514 {
515 /* Get the register (or SIB) value. */
516#ifdef _MSC_VER
517 u64EffAddr = 0; /* MSC uninitialized variable analysis is too simple, it seems. */
518#endif
519 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
520 {
521 default:
522 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
523 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
524 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
525 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
526 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
527 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
528 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
529 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
530 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
531 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
532 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
533 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
534 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
535 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
536 /* SIB */
537 case 4:
538 case 12:
539 {
540 /* Get the index and scale it. */
541 switch ( ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
542 | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */
543 {
544 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
545 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
546 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
547 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
548 case 4: u64EffAddr = 0; /*none */ break;
549 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
550 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
551 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
552 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
553 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
554 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
555 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
556 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
557 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
558 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
559 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
560 }
561 u64EffAddr <<= (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
562
563 /* add base */
564 switch ((uSibAndRspOffset & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */
565 {
566 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
567 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
568 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
569 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
570 case 4:
571 u64EffAddr += pVCpu->cpum.GstCtx.rsp;
572 u64EffAddr += uSibAndRspOffset >> 8;
573 break;
574 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
575 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
576 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
577 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
578 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
579 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
580 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
581 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
582 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
583 /* complicated encodings */
584 case 5:
585 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
586 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
587 else
588 u64EffAddr += (int32_t)u32Disp;
589 break;
590 case 13:
591 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
592 u64EffAddr += pVCpu->cpum.GstCtx.r13;
593 else
594 u64EffAddr += (int32_t)u32Disp;
595 break;
596 }
597 break;
598 }
599 }
600
601 /* Get and add the displacement. */
602 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
603 {
604 case 0: break;
605 case 1: u64EffAddr += (int8_t)u32Disp; break;
606 case 2: u64EffAddr += (int32_t)u32Disp; break;
607 default: AssertFailed();
608 }
609 }
610
611 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr));
612 return u64EffAddr;
613}
614
615
616/*
617 * The threaded functions.
618 */
619#include "IEMThreadedFunctions.cpp.h"
620
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette