VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpHlp-x86.cpp

Last change on this file was 108278, checked in by vboxsync, 2 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.0 KB
Line 
1/* $Id: IEMAllOpHlp-x86.cpp 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, opcode decoding helpers.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/dbgf.h>
40#include "IEMInternal.h"
41#include <VBox/vmm/vmcc.h>
42#include <VBox/log.h>
43#include <VBox/err.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46#include <iprt/x86.h>
47
48#include "IEMInlineDecode-x86.h"
49
50
51
52/** @name Opcode Helpers.
53 * @{
54 */
55
56/**
57 * Calculates the effective address of a ModR/M memory operand.
58 *
59 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
60 *
61 * May longjmp on internal error.
62 *
63 * @return The effective address.
64 * @param pVCpu The cross context virtual CPU structure of the calling thread.
65 * @param bRm The ModRM byte.
66 * @param cbImmAndRspOffset - First byte: The size of any immediate
67 * following the effective address opcode bytes
68 * (only for RIP relative addressing).
69 * - Second byte: RSP displacement (for POP [ESP]).
70 */
71RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
72{
73 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
74#define SET_SS_DEF() \
75 do \
76 { \
77 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
78 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
79 } while (0)
80
81 if (!IEM_IS_64BIT_CODE(pVCpu))
82 {
83/** @todo Check the effective address size crap! */
84 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
85 {
86 uint16_t u16EffAddr;
87
88 /* Handle the disp16 form with no registers first. */
89 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
90 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
91 else
92 {
93 /* Get the displacment. */
94 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
95 {
96 case 0: u16EffAddr = 0; break;
97 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
98 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
99 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
100 }
101
102 /* Add the base and index registers to the disp. */
103 switch (bRm & X86_MODRM_RM_MASK)
104 {
105 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
106 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
107 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
108 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
109 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
110 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
111 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
112 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
113 }
114 }
115
116 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
117 return u16EffAddr;
118 }
119
120 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
121 uint32_t u32EffAddr;
122
123 /* Handle the disp32 form with no registers first. */
124 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
125 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
126 else
127 {
128 /* Get the register (or SIB) value. */
129 switch ((bRm & X86_MODRM_RM_MASK))
130 {
131 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
132 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
133 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
134 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
135 case 4: /* SIB */
136 {
137 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
138
139 /* Get the index and scale it. */
140 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
141 {
142 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
143 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
144 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
145 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
146 case 4: u32EffAddr = 0; /*none */ break;
147 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
148 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
149 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
150 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
151 }
152 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
153
154 /* add base */
155 switch (bSib & X86_SIB_BASE_MASK)
156 {
157 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
158 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
159 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
160 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
161 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
162 case 5:
163 if ((bRm & X86_MODRM_MOD_MASK) != 0)
164 {
165 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
166 SET_SS_DEF();
167 }
168 else
169 {
170 uint32_t u32Disp;
171 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
172 u32EffAddr += u32Disp;
173 }
174 break;
175 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
176 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
177 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
178 }
179 break;
180 }
181 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
182 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
183 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
184 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
185 }
186
187 /* Get and add the displacement. */
188 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
189 {
190 case 0:
191 break;
192 case 1:
193 {
194 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
195 u32EffAddr += i8Disp;
196 break;
197 }
198 case 2:
199 {
200 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
201 u32EffAddr += u32Disp;
202 break;
203 }
204 default:
205 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
206 }
207 }
208
209 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
210 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
211 return u32EffAddr;
212 }
213
214 uint64_t u64EffAddr;
215
216 /* Handle the rip+disp32 form with no registers first. */
217 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
218 {
219 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
220 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
221 }
222 else
223 {
224 /* Get the register (or SIB) value. */
225 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
226 {
227 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
228 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
229 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
230 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
231 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
232 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
233 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
234 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
235 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
236 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
237 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
238 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
239 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
240 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
241 /* SIB */
242 case 4:
243 case 12:
244 {
245 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
246
247 /* Get the index and scale it. */
248 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
249 {
250 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
251 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
252 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
253 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
254 case 4: u64EffAddr = 0; /*none */ break;
255 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
256 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
257 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
258 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
259 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
260 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
261 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
262 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
263 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
264 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
265 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
266 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
267 }
268 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
269
270 /* add base */
271 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
272 {
273 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
274 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
275 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
276 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
277 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
278 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
279 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
280 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
281 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
282 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
283 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
284 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
285 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
286 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
287 /* complicated encodings */
288 case 5:
289 case 13:
290 if ((bRm & X86_MODRM_MOD_MASK) != 0)
291 {
292 if (!pVCpu->iem.s.uRexB)
293 {
294 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
295 SET_SS_DEF();
296 }
297 else
298 u64EffAddr += pVCpu->cpum.GstCtx.r13;
299 }
300 else
301 {
302 uint32_t u32Disp;
303 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
304 u64EffAddr += (int32_t)u32Disp;
305 }
306 break;
307 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
308 }
309 break;
310 }
311 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
312 }
313
314 /* Get and add the displacement. */
315 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
316 {
317 case 0:
318 break;
319 case 1:
320 {
321 int8_t i8Disp;
322 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
323 u64EffAddr += i8Disp;
324 break;
325 }
326 case 2:
327 {
328 uint32_t u32Disp;
329 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
330 u64EffAddr += (int32_t)u32Disp;
331 break;
332 }
333 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
334 }
335
336 }
337
338 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
339 {
340 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
341 return u64EffAddr;
342 }
343 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
344 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
345 return u64EffAddr & UINT32_MAX;
346}
347
348/** @} */
349
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette