VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMThreadedFunctions.cpp@ 98969

Last change on this file since 98969 was 98969, checked in by vboxsync, 2 years ago

VMM/IEM: More work on processing MC blocks, mainly related to reworking common functions for binary operations into body macros. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.0 KB
Line 
1/* $Id: IEMThreadedFunctions.cpp 98969 2023-03-15 00:24:47Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Threaded Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#define IEM_WITH_OPAQUE_DECODER_STATE
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70#include "IEMMc.h"
71
72#include "IEMThreadedFunctions.h"
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78
79/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param. */
80#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED(a_cbInstr) \
81 return iemRegAddToRipAndFinishingClearingRF(pVCpu, a_cbInstr)
82#undef IEM_MC_ADVANCE_RIP_AND_FINISH
83
84/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as param. */
85#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED(a_i8, a_cbInstr) \
86 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), pVCpu->iem.s.enmEffOpSize)
87#undef IEM_MC_REL_JMP_S8_AND_FINISH
88
89/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as param. */
90#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED(a_i16, a_cbInstr) \
91 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
92#undef IEM_MC_REL_JMP_S16_AND_FINISH
93
94/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as param. */
95#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED(a_i32, a_cbInstr) \
96 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), pVCpu->iem.s.enmEffOpSize)
97#undef IEM_MC_REL_JMP_S32_AND_FINISH
98
99/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
100# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \
101 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
102
103/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
104# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
105 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp)
106
107/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
108# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
109 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp)
110
111/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
112# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
113 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm)
114
115/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
116# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
117 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm)
118
119/**
120 * Calculates the effective address of a ModR/M memory operand, 16-bit
121 * addressing variant.
122 *
123 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16.
124 *
125 * @returns The effective address.
126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
127 * @param bRm The ModRM byte.
128 * @param u16Disp The displacement byte/word, if any.
129 * RIP relative addressing.
130 * @param pGCPtrEff Where to return the effective address.
131 */
132static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT
133{
134 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x\n", bRm));
135 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
136
137 /* Handle the disp16 form with no registers first. */
138 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
139 {
140 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp));
141 return u16Disp;
142 }
143
144 /* Get the displacment. */
145 /** @todo we can eliminate this step by making u16Disp have this value
146 * already! */
147 uint16_t u16EffAddr;
148 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
149 {
150 case 0: u16EffAddr = 0; break;
151 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
152 case 2: u16EffAddr = u16Disp; break;
153 default: AssertFailedStmt(u16EffAddr = 0);
154 }
155
156 /* Add the base and index registers to the disp. */
157 switch (bRm & X86_MODRM_RM_MASK)
158 {
159 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
160 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
161 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break;
162 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break;
163 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
164 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
165 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break;
166 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
167 }
168
169 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr));
170 return u16EffAddr;
171}
172
173
174/**
175 * Calculates the effective address of a ModR/M memory operand, 32-bit
176 * addressing variant.
177 *
178 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and
179 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT.
180 *
181 * @returns The effective address.
182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
183 * @param bRm The ModRM byte.
184 * @param bSib The SIB byte, if any.
185 * @param u32Disp The displacement byte/dword, if any.
186 */
187static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint8_t bSib, uint32_t u32Disp) RT_NOEXCEPT
188{
189 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x\n", bRm));
190 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
191
192 /* Handle the disp32 form with no registers first. */
193 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
194 {
195 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp));
196 return u32Disp;
197 }
198
199 /* Get the register (or SIB) value. */
200 uint32_t u32EffAddr;
201 switch (bRm & X86_MODRM_RM_MASK)
202 {
203 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
204 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
205 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
206 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
207 case 4: /* SIB */
208 {
209 /* Get the index and scale it. */
210 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
211 {
212 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
213 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
214 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
215 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
216 case 4: u32EffAddr = 0; /*none */ break;
217 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
218 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
219 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
220 }
221 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
222
223 /* add base */
224 switch (bSib & X86_SIB_BASE_MASK)
225 {
226 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
227 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
228 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
229 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
230 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; break;
231 case 5:
232 if ((bRm & X86_MODRM_MOD_MASK) != 0)
233 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
234 else
235 u32EffAddr += u32Disp;
236 break;
237 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
238 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
239 }
240 break;
241 }
242 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
243 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
244 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
245 }
246
247 /* Get and add the displacement. */
248 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
249 {
250 case 0: break;
251 case 1: u32EffAddr += (int8_t)u32Disp; break;
252 case 2: u32EffAddr += u32Disp; break;
253 default: AssertFailed();
254 }
255
256 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr));
257 return u32EffAddr;
258}
259
260
261/**
262 * Calculates the effective address of a ModR/M memory operand.
263 *
264 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64.
265 *
266 * @returns The effective address.
267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
268 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
269 * bit 4 to REX.X. The two bits are part of the
270 * REG sub-field, which isn't needed in this
271 * function.
272 * @param bSib The SIB byte, if any.
273 * @param u32Disp The displacement byte/word/dword, if any.
274 * @param cbInstr The size of the fully decoded instruction. Used
275 * for RIP relative addressing.
276 * @todo combine cbInstr and cbImm!
277 */
278static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint8_t bSib,
279 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT
280{
281 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx));
282 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
283
284 uint64_t u64EffAddr;
285
286 /* Handle the rip+disp32 form with no registers first. */
287 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
288 {
289 u64EffAddr = (int32_t)u32Disp;
290 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr;
291 }
292 else
293 {
294 /* Get the register (or SIB) value. */
295 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
296 {
297 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
298 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
299 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
300 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
301 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
302 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
303 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
304 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
305 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
306 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
307 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
308 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
309 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
310 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
311 /* SIB */
312 case 4:
313 case 12:
314 {
315 /* Get the index and scale it. */
316 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */
317 {
318 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
319 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
320 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
321 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
322 case 4: u64EffAddr = 0; /*none */ break;
323 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
324 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
325 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
326 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
327 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
328 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
329 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
330 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
331 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
332 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
333 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
334 }
335 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
336
337 /* add base */
338 switch ((bSib & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */
339 {
340 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
341 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
342 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
343 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
344 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; break;
345 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
346 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
347 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
348 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
349 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
350 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
351 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
352 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
353 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
354 /* complicated encodings */
355 case 5:
356 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
357 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
358 else
359 u64EffAddr += (int32_t)u32Disp;
360 break;
361 case 13:
362 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
363 u64EffAddr += pVCpu->cpum.GstCtx.r13;
364 else
365 u64EffAddr += (int32_t)u32Disp;
366 break;
367 }
368 break;
369 }
370 }
371
372 /* Get and add the displacement. */
373 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
374 {
375 case 0: break;
376 case 1: u64EffAddr += (int8_t)u32Disp; break;
377 case 2: u64EffAddr += (int32_t)u32Disp; break;
378 default: AssertFailed();
379 }
380 }
381
382 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr));
383 return u64EffAddr;
384}
385
386
387
388/*
389 * The threaded functions.
390 */
391#include "IEMThreadedFunctions.cpp.h"
392
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette