VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 104988

Last change on this file since 104988 was 104988, checked in by vboxsync, 5 months ago

VMM/IEM: Use the IEM_F_X86_AC flag where we can. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 165.7 KB
Line 
1/* $Id: IEMInline.h 104988 2024-06-20 20:42:07Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
49 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
51 || rcStrict == VINF_VMX_VMEXIT
52#endif
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54 || rcStrict == VINF_SVM_VMEXIT
55#endif
56 )
57 {
58 rcStrict = pVCpu->iem.s.rcPassUp;
59 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
60 { /* likely */ }
61 else
62 pVCpu->iem.s.cRetPassUpStatus++;
63 }
64 else if (RT_SUCCESS(rcStrict))
65 {
66 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
67 || rcStrict == VINF_IOM_R3_IOPORT_READ
68 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
69 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
70 || rcStrict == VINF_IOM_R3_MMIO_READ
71 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
72 || rcStrict == VINF_IOM_R3_MMIO_WRITE
73 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
74 || rcStrict == VINF_CPUM_R3_MSR_READ
75 || rcStrict == VINF_CPUM_R3_MSR_WRITE
76 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
77 || rcStrict == VINF_EM_RAW_TO_R3
78 || rcStrict == VINF_EM_TRIPLE_FAULT
79 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK
80 || rcStrict == VINF_GIM_R3_HYPERCALL
81 /* raw-mode / virt handlers only: */
82 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
83 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
85 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
86 || rcStrict == VINF_SELM_SYNC_GDT
87 || rcStrict == VINF_CSAM_PENDING_ACTION
88 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
89 /* nested hw.virt codes: */
90 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
91 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
92 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
93/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
94 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
95 if (rcPassUp == VINF_SUCCESS)
96 pVCpu->iem.s.cRetInfStatuses++;
97 else if ( rcPassUp < VINF_EM_FIRST
98 || rcPassUp > VINF_EM_LAST
99 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
100 {
101 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
102 pVCpu->iem.s.cRetPassUpStatus++;
103 rcStrict = rcPassUp;
104 }
105 else
106 {
107 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
108 pVCpu->iem.s.cRetInfStatuses++;
109 }
110 }
111 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
112 pVCpu->iem.s.cRetAspectNotImplemented++;
113 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
114 pVCpu->iem.s.cRetInstrNotImplemented++;
115 else
116 pVCpu->iem.s.cRetErrStatuses++;
117 }
118 else
119 {
120 rcStrict = pVCpu->iem.s.rcPassUp;
121 if (rcStrict != VINF_SUCCESS)
122 pVCpu->iem.s.cRetPassUpStatus++;
123 }
124
125 /* Just clear it here as well. */
126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
127
128 return rcStrict;
129}
130
131
132/**
133 * Sets the pass up status.
134 *
135 * @returns VINF_SUCCESS.
136 * @param pVCpu The cross context virtual CPU structure of the
137 * calling thread.
138 * @param rcPassUp The pass up status. Must be informational.
139 * VINF_SUCCESS is not allowed.
140 */
141DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
142{
143 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
144
145 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
146 if (rcOldPassUp == VINF_SUCCESS)
147 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
148 /* If both are EM scheduling codes, use EM priority rules. */
149 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
150 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
151 {
152 if (rcPassUp < rcOldPassUp)
153 {
154 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
156 }
157 else
158 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
159 }
160 /* Override EM scheduling with specific status code. */
161 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
162 {
163 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
164 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
165 }
166 /* Don't override specific status code, first come first served. */
167 else
168 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Calculates the IEM_F_X86_AC flags.
175 *
176 * @returns IEM_F_X86_AC or zero
177 * @param pVCpu The cross context virtual CPU structure of the
178 * calling thread.
179 */
180DECL_FORCE_INLINE(uint32_t) iemCalcExecAcFlag(PVMCPUCC pVCpu) RT_NOEXCEPT
181{
182 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
184
185 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
186 || (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_AM | X86_CR0_PE)) != (X86_CR0_AM | X86_CR0_PE)
187 || ( !pVCpu->cpum.GstCtx.eflags.Bits.u1VM
188 && pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl != 3))
189 return 0;
190 return IEM_F_X86_AC;
191}
192
193
194/**
195 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
196 *
197 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
198 * reject expand down data segments and conforming code segments.
199 *
200 * ASSUMES that the CPU is in 32-bit mode.
201 *
202 * @note Will return zero when if any of the segment register state is marked
203 * external, this must be factored into assertions checking fExec
204 * consistency.
205 *
206 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
207 * @param pVCpu The cross context virtual CPU structure of the
208 * calling thread.
209 * @sa iemCalc32BitFlatIndicatorEsDs
210 */
211DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
212{
213 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
214 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
215 | pVCpu->cpum.GstCtx.cs.Attr.u
216 | pVCpu->cpum.GstCtx.ss.Attr.u
217 | pVCpu->cpum.GstCtx.ds.Attr.u)
218 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
219 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
220 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
221 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
222 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
223 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
224 == 0
225 && ( pVCpu->cpum.GstCtx.es.u64Base
226 | pVCpu->cpum.GstCtx.cs.u64Base
227 | pVCpu->cpum.GstCtx.ss.u64Base
228 | pVCpu->cpum.GstCtx.ds.u64Base)
229 == 0
230 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
231 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
232}
233
234
235/**
236 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
237 * flat already.
238 *
239 * This is used by sysenter.
240 *
241 * @note Will return zero when if any of the segment register state is marked
242 * external, this must be factored into assertions checking fExec
243 * consistency.
244 *
245 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @sa iemCalc32BitFlatIndicator
249 */
250DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
251{
252 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
253 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
254 | pVCpu->cpum.GstCtx.ds.Attr.u)
255 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
256 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
257 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
258 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
259 == 0
260 && ( pVCpu->cpum.GstCtx.es.u64Base
261 | pVCpu->cpum.GstCtx.ds.u64Base)
262 == 0
263 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
264 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
265}
266
267
268/**
269 * Calculates the IEM_F_MODE_XXX, CPL and AC flags.
270 *
271 * @returns IEM_F_MODE_XXX, IEM_F_X86_CPL_MASK and IEM_F_X86_AC.
272 * @param pVCpu The cross context virtual CPU structure of the
273 * calling thread.
274 */
275DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
276{
277 /*
278 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
279 * here to try get this done as efficiently as possible.
280 */
281 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
282
283 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
284 {
285 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
286 {
287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
288 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
289 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
290 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
291 || fExec != (3U << IEM_F_X86_CPL_SHIFT))
292 { /* likely */ }
293 else
294 fExec |= IEM_F_X86_AC;
295
296 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
297 {
298 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
299 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
300 }
301 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
302 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
303 fExec |= IEM_F_MODE_X86_64BIT;
304 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
305 fExec |= IEM_F_MODE_X86_16BIT_PROT;
306 else
307 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
308 return fExec;
309 }
310 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
311 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM))
312 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
313 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT) | IEM_F_X86_AC;
314 }
315
316 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
317 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
318 {
319 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
320 return IEM_F_MODE_X86_16BIT;
321 return IEM_F_MODE_X86_16BIT_PRE_386;
322 }
323
324 /* 32-bit unreal mode. */
325 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
326}
327
328
329/**
330 * Calculates the AMD-V and VT-x related context flags.
331 *
332 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
333 * IEM_F_X86_CTX_VMX.
334 * @param pVCpu The cross context virtual CPU structure of the
335 * calling thread.
336 */
337DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
338{
339 /*
340 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
341 * and CPUMIsGuestInNestedHwvirtMode to some extent.
342 */
343 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
344
345 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
346 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
347 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
348 if (RT_LIKELY(!fTmp))
349 return 0; /* likely */
350
351 if (fTmp & X86_CR4_VMXE)
352 {
353 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
354 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
355 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
356 return IEM_F_X86_CTX_VMX;
357 }
358
359 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
360 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
361 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
362 return IEM_F_X86_CTX_SVM;
363}
364
365#ifdef VBOX_INCLUDED_vmm_dbgf_h /* VM::dbgf.ro.cEnabledHwBreakpoints is only accessible if VBox/vmm/dbgf.h is included. */
366
367/**
368 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
369 *
370 * @returns IEM_F_BRK_PENDING_XXX or zero.
371 * @param pVCpu The cross context virtual CPU structure of the
372 * calling thread.
373 */
374DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
375{
376 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
377
378 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
379 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
380 return 0;
381 return iemCalcExecDbgFlagsSlow(pVCpu);
382}
383
384/**
385 * Calculates the the IEM_F_XXX flags.
386 *
387 * @returns IEM_F_XXX combination match the current CPU state.
388 * @param pVCpu The cross context virtual CPU structure of the
389 * calling thread.
390 */
391DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
392{
393 return iemCalcExecModeAndCplFlags(pVCpu)
394 | iemCalcExecHwVirtFlags(pVCpu)
395 /* SMM is not yet implemented */
396 | iemCalcExecDbgFlags(pVCpu)
397 ;
398}
399
400
401/**
402 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
403 *
404 * @param pVCpu The cross context virtual CPU structure of the
405 * calling thread.
406 */
407DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplAndAcFlags(PVMCPUCC pVCpu)
408{
409 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
410 | iemCalcExecModeAndCplFlags(pVCpu);
411}
412
413
414/**
415 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
416 *
417 * @param pVCpu The cross context virtual CPU structure of the
418 * calling thread.
419 */
420DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
421{
422 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
423 | iemCalcExecDbgFlags(pVCpu);
424}
425
426#endif /* VBOX_INCLUDED_vmm_dbgf_h */
427
428
429#ifndef IEM_WITH_OPAQUE_DECODER_STATE
430
431# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
432
433/**
434 * Initializes the execution state.
435 *
436 * @param pVCpu The cross context virtual CPU structure of the
437 * calling thread.
438 * @param fExecOpts Optional execution flags:
439 * - IEM_F_BYPASS_HANDLERS
440 * - IEM_F_X86_DISREGARD_LOCK
441 *
442 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
443 * side-effects in strict builds.
444 */
445DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
446{
447 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
448 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
449 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
452 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
453 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
455 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
456 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
457
458 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
459 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
460 pVCpu->iem.s.cActiveMappings = 0;
461 pVCpu->iem.s.iNextMapping = 0;
462
463# ifdef VBOX_STRICT
464 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
465 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
466 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
467 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
468 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
469 pVCpu->iem.s.uRexReg = 127;
470 pVCpu->iem.s.uRexB = 127;
471 pVCpu->iem.s.offModRm = 127;
472 pVCpu->iem.s.uRexIndex = 127;
473 pVCpu->iem.s.iEffSeg = 127;
474 pVCpu->iem.s.idxPrefix = 127;
475 pVCpu->iem.s.uVex3rdReg = 127;
476 pVCpu->iem.s.uVexLength = 127;
477 pVCpu->iem.s.fEvexStuff = 127;
478 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
479# ifdef IEM_WITH_CODE_TLB
480 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
481 pVCpu->iem.s.pbInstrBuf = NULL;
482 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
483 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
484 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
485 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
486# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
487 pVCpu->iem.s.offOpcode = 127;
488# endif
489# else
490 pVCpu->iem.s.offOpcode = 127;
491 pVCpu->iem.s.cbOpcode = 127;
492# endif
493# endif /* VBOX_STRICT */
494}
495
496
497# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
498/**
499 * Performs a minimal reinitialization of the execution state.
500 *
501 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
502 * 'world-switch' types operations on the CPU. Currently only nested
503 * hardware-virtualization uses it.
504 *
505 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
506 * @param cbInstr The instruction length (for flushing).
507 */
508DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
509{
510 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
511 iemOpcodeFlushHeavy(pVCpu, cbInstr);
512}
513# endif
514
515# endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */
516
517/**
518 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
519 *
520 * @param pVCpu The cross context virtual CPU structure of the
521 * calling thread.
522 */
523DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
524{
525 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
526# ifdef VBOX_STRICT
527# ifdef IEM_WITH_CODE_TLB
528 NOREF(pVCpu);
529# else
530 pVCpu->iem.s.cbOpcode = 0;
531# endif
532# else
533 NOREF(pVCpu);
534# endif
535}
536
537
538/**
539 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
540 *
541 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
542 *
543 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
545 * @param rcStrict The status code to fiddle.
546 */
547DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
548{
549 iemUninitExec(pVCpu);
550 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
551}
552
553
554/**
555 * Macro used by the IEMExec* method to check the given instruction length.
556 *
557 * Will return on failure!
558 *
559 * @param a_cbInstr The given instruction length.
560 * @param a_cbMin The minimum length.
561 */
562# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
563 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
564 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
565
566
567# ifndef IEM_WITH_SETJMP
568
569/**
570 * Fetches the first opcode byte.
571 *
572 * @returns Strict VBox status code.
573 * @param pVCpu The cross context virtual CPU structure of the
574 * calling thread.
575 * @param pu8 Where to return the opcode byte.
576 */
577DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
578{
579 /*
580 * Check for hardware instruction breakpoints.
581 */
582 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
583 { /* likely */ }
584 else
585 {
586 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
587 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
588 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
589 { /* likely */ }
590 else
591 {
592 *pu8 = 0xff; /* shut up gcc. sigh */
593 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
594 return iemRaiseDebugException(pVCpu);
595 return rcStrict;
596 }
597 }
598
599 /*
600 * Fetch the first opcode byte.
601 */
602 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
603 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
604 {
605 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
606 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
607 return VINF_SUCCESS;
608 }
609 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
610}
611
612# else /* IEM_WITH_SETJMP */
613
614/**
615 * Fetches the first opcode byte, longjmp on error.
616 *
617 * @returns The opcode byte.
618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
619 */
620DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
621{
622 /*
623 * Check for hardware instruction breakpoints.
624 */
625 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
626 { /* likely */ }
627 else
628 {
629 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
630 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
631 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
632 { /* likely */ }
633 else
634 {
635 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
636 rcStrict = iemRaiseDebugException(pVCpu);
637 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
638 }
639 }
640
641 /*
642 * Fetch the first opcode byte.
643 */
644# ifdef IEM_WITH_CODE_TLB
645 uint8_t bRet;
646 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
647 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
648 if (RT_LIKELY( pbBuf != NULL
649 && offBuf < pVCpu->iem.s.cbInstrBuf))
650 {
651 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
652 bRet = pbBuf[offBuf];
653 }
654 else
655 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
656# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
657 Assert(pVCpu->iem.s.offOpcode == 0);
658 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
659# endif
660 return bRet;
661
662# else /* !IEM_WITH_CODE_TLB */
663 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
664 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
665 {
666 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
667 return pVCpu->iem.s.abOpcode[offOpcode];
668 }
669 return iemOpcodeGetNextU8SlowJmp(pVCpu);
670# endif
671}
672
673# endif /* IEM_WITH_SETJMP */
674
675/**
676 * Fetches the first opcode byte, returns/throws automatically on failure.
677 *
678 * @param a_pu8 Where to return the opcode byte.
679 * @remark Implicitly references pVCpu.
680 */
681# ifndef IEM_WITH_SETJMP
682# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
683 do \
684 { \
685 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
686 if (rcStrict2 == VINF_SUCCESS) \
687 { /* likely */ } \
688 else \
689 return rcStrict2; \
690 } while (0)
691# else
692# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
693# endif /* IEM_WITH_SETJMP */
694
695
696# ifndef IEM_WITH_SETJMP
697
698/**
699 * Fetches the next opcode byte.
700 *
701 * @returns Strict VBox status code.
702 * @param pVCpu The cross context virtual CPU structure of the
703 * calling thread.
704 * @param pu8 Where to return the opcode byte.
705 */
706DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
707{
708 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
709 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
710 {
711 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
712 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
713 return VINF_SUCCESS;
714 }
715 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
716}
717
718# else /* IEM_WITH_SETJMP */
719
720/**
721 * Fetches the next opcode byte, longjmp on error.
722 *
723 * @returns The opcode byte.
724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
725 */
726DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
727{
728# ifdef IEM_WITH_CODE_TLB
729 uint8_t bRet;
730 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
731 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
732 if (RT_LIKELY( pbBuf != NULL
733 && offBuf < pVCpu->iem.s.cbInstrBuf))
734 {
735 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
736 bRet = pbBuf[offBuf];
737 }
738 else
739 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
740# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
741 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
742 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
743# endif
744 return bRet;
745
746# else /* !IEM_WITH_CODE_TLB */
747 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
748 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
749 {
750 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
751 return pVCpu->iem.s.abOpcode[offOpcode];
752 }
753 return iemOpcodeGetNextU8SlowJmp(pVCpu);
754# endif
755}
756
757# endif /* IEM_WITH_SETJMP */
758
759/**
760 * Fetches the next opcode byte, returns automatically on failure.
761 *
762 * @param a_pu8 Where to return the opcode byte.
763 * @remark Implicitly references pVCpu.
764 */
765# ifndef IEM_WITH_SETJMP
766# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
767 do \
768 { \
769 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
770 if (rcStrict2 == VINF_SUCCESS) \
771 { /* likely */ } \
772 else \
773 return rcStrict2; \
774 } while (0)
775# else
776# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
777# endif /* IEM_WITH_SETJMP */
778
779
780# ifndef IEM_WITH_SETJMP
781/**
782 * Fetches the next signed byte from the opcode stream.
783 *
784 * @returns Strict VBox status code.
785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
786 * @param pi8 Where to return the signed byte.
787 */
788DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
789{
790 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
791}
792# endif /* !IEM_WITH_SETJMP */
793
794
795/**
796 * Fetches the next signed byte from the opcode stream, returning automatically
797 * on failure.
798 *
799 * @param a_pi8 Where to return the signed byte.
800 * @remark Implicitly references pVCpu.
801 */
802# ifndef IEM_WITH_SETJMP
803# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
804 do \
805 { \
806 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
807 if (rcStrict2 != VINF_SUCCESS) \
808 return rcStrict2; \
809 } while (0)
810# else /* IEM_WITH_SETJMP */
811# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
812
813# endif /* IEM_WITH_SETJMP */
814
815
816# ifndef IEM_WITH_SETJMP
817/**
818 * Fetches the next signed byte from the opcode stream, extending it to
819 * unsigned 16-bit.
820 *
821 * @returns Strict VBox status code.
822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
823 * @param pu16 Where to return the unsigned word.
824 */
825DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
826{
827 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
828 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
829 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
830
831 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
832 pVCpu->iem.s.offOpcode = offOpcode + 1;
833 return VINF_SUCCESS;
834}
835# endif /* !IEM_WITH_SETJMP */
836
837/**
838 * Fetches the next signed byte from the opcode stream and sign-extending it to
839 * a word, returning automatically on failure.
840 *
841 * @param a_pu16 Where to return the word.
842 * @remark Implicitly references pVCpu.
843 */
844# ifndef IEM_WITH_SETJMP
845# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
846 do \
847 { \
848 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
849 if (rcStrict2 != VINF_SUCCESS) \
850 return rcStrict2; \
851 } while (0)
852# else
853# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
854# endif
855
856# ifndef IEM_WITH_SETJMP
857/**
858 * Fetches the next signed byte from the opcode stream, extending it to
859 * unsigned 32-bit.
860 *
861 * @returns Strict VBox status code.
862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
863 * @param pu32 Where to return the unsigned dword.
864 */
865DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
866{
867 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
868 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
869 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
870
871 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
872 pVCpu->iem.s.offOpcode = offOpcode + 1;
873 return VINF_SUCCESS;
874}
875# endif /* !IEM_WITH_SETJMP */
876
877/**
878 * Fetches the next signed byte from the opcode stream and sign-extending it to
879 * a word, returning automatically on failure.
880 *
881 * @param a_pu32 Where to return the word.
882 * @remark Implicitly references pVCpu.
883 */
884# ifndef IEM_WITH_SETJMP
885# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
886 do \
887 { \
888 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
889 if (rcStrict2 != VINF_SUCCESS) \
890 return rcStrict2; \
891 } while (0)
892# else
893# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
894# endif
895
896
897# ifndef IEM_WITH_SETJMP
898/**
899 * Fetches the next signed byte from the opcode stream, extending it to
900 * unsigned 64-bit.
901 *
902 * @returns Strict VBox status code.
903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
904 * @param pu64 Where to return the unsigned qword.
905 */
906DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
907{
908 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
909 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
910 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
911
912 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
913 pVCpu->iem.s.offOpcode = offOpcode + 1;
914 return VINF_SUCCESS;
915}
916# endif /* !IEM_WITH_SETJMP */
917
918/**
919 * Fetches the next signed byte from the opcode stream and sign-extending it to
920 * a word, returning automatically on failure.
921 *
922 * @param a_pu64 Where to return the word.
923 * @remark Implicitly references pVCpu.
924 */
925# ifndef IEM_WITH_SETJMP
926# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
927 do \
928 { \
929 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
930 if (rcStrict2 != VINF_SUCCESS) \
931 return rcStrict2; \
932 } while (0)
933# else
934# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
935# endif
936
937
938# ifndef IEM_WITH_SETJMP
939
940/**
941 * Fetches the next opcode word.
942 *
943 * @returns Strict VBox status code.
944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
945 * @param pu16 Where to return the opcode word.
946 */
947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
948{
949 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
950 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
951 {
952 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
953# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
954 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
955# else
956 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
957# endif
958 return VINF_SUCCESS;
959 }
960 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
961}
962
963# else /* IEM_WITH_SETJMP */
964
965/**
966 * Fetches the next opcode word, longjmp on error.
967 *
968 * @returns The opcode word.
969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
970 */
971DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
972{
973# ifdef IEM_WITH_CODE_TLB
974 uint16_t u16Ret;
975 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
976 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
977 if (RT_LIKELY( pbBuf != NULL
978 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
979 {
980 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
981# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
982 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
983# else
984 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
985# endif
986 }
987 else
988 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
989
990# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
991 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
992 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
993# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
994 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
995# else
996 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
997 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
998# endif
999 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
1000# endif
1001
1002 return u16Ret;
1003
1004# else /* !IEM_WITH_CODE_TLB */
1005 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1006 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
1007 {
1008 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
1009# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1010 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1011# else
1012 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1013# endif
1014 }
1015 return iemOpcodeGetNextU16SlowJmp(pVCpu);
1016# endif /* !IEM_WITH_CODE_TLB */
1017}
1018
1019# endif /* IEM_WITH_SETJMP */
1020
1021/**
1022 * Fetches the next opcode word, returns automatically on failure.
1023 *
1024 * @param a_pu16 Where to return the opcode word.
1025 * @remark Implicitly references pVCpu.
1026 */
1027# ifndef IEM_WITH_SETJMP
1028# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1029 do \
1030 { \
1031 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
1032 if (rcStrict2 != VINF_SUCCESS) \
1033 return rcStrict2; \
1034 } while (0)
1035# else
1036# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
1037# endif
1038
1039# ifndef IEM_WITH_SETJMP
1040/**
1041 * Fetches the next opcode word, zero extending it to a double word.
1042 *
1043 * @returns Strict VBox status code.
1044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1045 * @param pu32 Where to return the opcode double word.
1046 */
1047DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1048{
1049 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1050 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1051 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1052
1053 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1054 pVCpu->iem.s.offOpcode = offOpcode + 2;
1055 return VINF_SUCCESS;
1056}
1057# endif /* !IEM_WITH_SETJMP */
1058
1059/**
1060 * Fetches the next opcode word and zero extends it to a double word, returns
1061 * automatically on failure.
1062 *
1063 * @param a_pu32 Where to return the opcode double word.
1064 * @remark Implicitly references pVCpu.
1065 */
1066# ifndef IEM_WITH_SETJMP
1067# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1068 do \
1069 { \
1070 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1071 if (rcStrict2 != VINF_SUCCESS) \
1072 return rcStrict2; \
1073 } while (0)
1074# else
1075# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1076# endif
1077
1078# ifndef IEM_WITH_SETJMP
1079/**
1080 * Fetches the next opcode word, zero extending it to a quad word.
1081 *
1082 * @returns Strict VBox status code.
1083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1084 * @param pu64 Where to return the opcode quad word.
1085 */
1086DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1087{
1088 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1089 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1090 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1091
1092 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1093 pVCpu->iem.s.offOpcode = offOpcode + 2;
1094 return VINF_SUCCESS;
1095}
1096# endif /* !IEM_WITH_SETJMP */
1097
1098/**
1099 * Fetches the next opcode word and zero extends it to a quad word, returns
1100 * automatically on failure.
1101 *
1102 * @param a_pu64 Where to return the opcode quad word.
1103 * @remark Implicitly references pVCpu.
1104 */
1105# ifndef IEM_WITH_SETJMP
1106# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1107 do \
1108 { \
1109 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1110 if (rcStrict2 != VINF_SUCCESS) \
1111 return rcStrict2; \
1112 } while (0)
1113# else
1114# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1115# endif
1116
1117
1118# ifndef IEM_WITH_SETJMP
1119/**
1120 * Fetches the next signed word from the opcode stream.
1121 *
1122 * @returns Strict VBox status code.
1123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1124 * @param pi16 Where to return the signed word.
1125 */
1126DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1127{
1128 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1129}
1130# endif /* !IEM_WITH_SETJMP */
1131
1132
1133/**
1134 * Fetches the next signed word from the opcode stream, returning automatically
1135 * on failure.
1136 *
1137 * @param a_pi16 Where to return the signed word.
1138 * @remark Implicitly references pVCpu.
1139 */
1140# ifndef IEM_WITH_SETJMP
1141# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1142 do \
1143 { \
1144 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1145 if (rcStrict2 != VINF_SUCCESS) \
1146 return rcStrict2; \
1147 } while (0)
1148# else
1149# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1150# endif
1151
1152# ifndef IEM_WITH_SETJMP
1153
1154/**
1155 * Fetches the next opcode dword.
1156 *
1157 * @returns Strict VBox status code.
1158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1159 * @param pu32 Where to return the opcode double word.
1160 */
1161DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1162{
1163 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1164 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1165 {
1166 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1167# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1168 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1169# else
1170 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1171 pVCpu->iem.s.abOpcode[offOpcode + 1],
1172 pVCpu->iem.s.abOpcode[offOpcode + 2],
1173 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1174# endif
1175 return VINF_SUCCESS;
1176 }
1177 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1178}
1179
1180# else /* IEM_WITH_SETJMP */
1181
1182/**
1183 * Fetches the next opcode dword, longjmp on error.
1184 *
1185 * @returns The opcode dword.
1186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1187 */
1188DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1189{
1190# ifdef IEM_WITH_CODE_TLB
1191 uint32_t u32Ret;
1192 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1193 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1194 if (RT_LIKELY( pbBuf != NULL
1195 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1196 {
1197 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1198# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1199 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
1200# else
1201 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1202 pbBuf[offBuf + 1],
1203 pbBuf[offBuf + 2],
1204 pbBuf[offBuf + 3]);
1205# endif
1206 }
1207 else
1208 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
1209
1210# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1211 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1212 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
1213# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1214 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
1215# else
1216 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
1217 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
1218 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
1219 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
1220# endif
1221 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
1222# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1223
1224 return u32Ret;
1225
1226# else /* !IEM_WITH_CODE_TLB */
1227 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1228 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1229 {
1230 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1231# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1232 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1233# else
1234 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1235 pVCpu->iem.s.abOpcode[offOpcode + 1],
1236 pVCpu->iem.s.abOpcode[offOpcode + 2],
1237 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1238# endif
1239 }
1240 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1241# endif
1242}
1243
1244# endif /* IEM_WITH_SETJMP */
1245
1246/**
1247 * Fetches the next opcode dword, returns automatically on failure.
1248 *
1249 * @param a_pu32 Where to return the opcode dword.
1250 * @remark Implicitly references pVCpu.
1251 */
1252# ifndef IEM_WITH_SETJMP
1253# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1254 do \
1255 { \
1256 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1257 if (rcStrict2 != VINF_SUCCESS) \
1258 return rcStrict2; \
1259 } while (0)
1260# else
1261# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1262# endif
1263
1264# ifndef IEM_WITH_SETJMP
1265/**
1266 * Fetches the next opcode dword, zero extending it to a quad word.
1267 *
1268 * @returns Strict VBox status code.
1269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1270 * @param pu64 Where to return the opcode quad word.
1271 */
1272DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1273{
1274 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1275 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1276 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1277
1278 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1279 pVCpu->iem.s.abOpcode[offOpcode + 1],
1280 pVCpu->iem.s.abOpcode[offOpcode + 2],
1281 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1282 pVCpu->iem.s.offOpcode = offOpcode + 4;
1283 return VINF_SUCCESS;
1284}
1285# endif /* !IEM_WITH_SETJMP */
1286
1287/**
1288 * Fetches the next opcode dword and zero extends it to a quad word, returns
1289 * automatically on failure.
1290 *
1291 * @param a_pu64 Where to return the opcode quad word.
1292 * @remark Implicitly references pVCpu.
1293 */
1294# ifndef IEM_WITH_SETJMP
1295# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1296 do \
1297 { \
1298 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1299 if (rcStrict2 != VINF_SUCCESS) \
1300 return rcStrict2; \
1301 } while (0)
1302# else
1303# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1304# endif
1305
1306
1307# ifndef IEM_WITH_SETJMP
1308/**
1309 * Fetches the next signed double word from the opcode stream.
1310 *
1311 * @returns Strict VBox status code.
1312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1313 * @param pi32 Where to return the signed double word.
1314 */
1315DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1316{
1317 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1318}
1319# endif
1320
1321/**
1322 * Fetches the next signed double word from the opcode stream, returning
1323 * automatically on failure.
1324 *
1325 * @param a_pi32 Where to return the signed double word.
1326 * @remark Implicitly references pVCpu.
1327 */
1328# ifndef IEM_WITH_SETJMP
1329# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336# else
1337# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1338# endif
1339
1340# ifndef IEM_WITH_SETJMP
1341/**
1342 * Fetches the next opcode dword, sign extending it into a quad word.
1343 *
1344 * @returns Strict VBox status code.
1345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1346 * @param pu64 Where to return the opcode quad word.
1347 */
1348DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1349{
1350 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1351 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1352 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1353
1354 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1355 pVCpu->iem.s.abOpcode[offOpcode + 1],
1356 pVCpu->iem.s.abOpcode[offOpcode + 2],
1357 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1358 *pu64 = (uint64_t)(int64_t)i32;
1359 pVCpu->iem.s.offOpcode = offOpcode + 4;
1360 return VINF_SUCCESS;
1361}
1362# endif /* !IEM_WITH_SETJMP */
1363
1364/**
1365 * Fetches the next opcode double word and sign extends it to a quad word,
1366 * returns automatically on failure.
1367 *
1368 * @param a_pu64 Where to return the opcode quad word.
1369 * @remark Implicitly references pVCpu.
1370 */
1371# ifndef IEM_WITH_SETJMP
1372# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1373 do \
1374 { \
1375 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1376 if (rcStrict2 != VINF_SUCCESS) \
1377 return rcStrict2; \
1378 } while (0)
1379# else
1380# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1381# endif
1382
1383# ifndef IEM_WITH_SETJMP
1384
1385/**
1386 * Fetches the next opcode qword.
1387 *
1388 * @returns Strict VBox status code.
1389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1390 * @param pu64 Where to return the opcode qword.
1391 */
1392DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1393{
1394 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1395 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1396 {
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1399# else
1400 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1401 pVCpu->iem.s.abOpcode[offOpcode + 1],
1402 pVCpu->iem.s.abOpcode[offOpcode + 2],
1403 pVCpu->iem.s.abOpcode[offOpcode + 3],
1404 pVCpu->iem.s.abOpcode[offOpcode + 4],
1405 pVCpu->iem.s.abOpcode[offOpcode + 5],
1406 pVCpu->iem.s.abOpcode[offOpcode + 6],
1407 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1408# endif
1409 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1410 return VINF_SUCCESS;
1411 }
1412 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1413}
1414
1415# else /* IEM_WITH_SETJMP */
1416
1417/**
1418 * Fetches the next opcode qword, longjmp on error.
1419 *
1420 * @returns The opcode qword.
1421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1422 */
1423DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1424{
1425# ifdef IEM_WITH_CODE_TLB
1426 uint64_t u64Ret;
1427 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1428 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1429 if (RT_LIKELY( pbBuf != NULL
1430 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1431 {
1432 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1433# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1434 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
1435# else
1436 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1437 pbBuf[offBuf + 1],
1438 pbBuf[offBuf + 2],
1439 pbBuf[offBuf + 3],
1440 pbBuf[offBuf + 4],
1441 pbBuf[offBuf + 5],
1442 pbBuf[offBuf + 6],
1443 pbBuf[offBuf + 7]);
1444# endif
1445 }
1446 else
1447 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
1448
1449# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1450 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1451 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
1452# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1453 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
1454# else
1455 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
1456 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
1457 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
1458 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
1459 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
1460 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
1461 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
1462 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
1463# endif
1464 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
1465# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1466
1467 return u64Ret;
1468
1469# else /* !IEM_WITH_CODE_TLB */
1470 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1471 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1472 {
1473 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1474# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1475 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1476# else
1477 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1478 pVCpu->iem.s.abOpcode[offOpcode + 1],
1479 pVCpu->iem.s.abOpcode[offOpcode + 2],
1480 pVCpu->iem.s.abOpcode[offOpcode + 3],
1481 pVCpu->iem.s.abOpcode[offOpcode + 4],
1482 pVCpu->iem.s.abOpcode[offOpcode + 5],
1483 pVCpu->iem.s.abOpcode[offOpcode + 6],
1484 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1485# endif
1486 }
1487 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1488# endif /* !IEM_WITH_CODE_TLB */
1489}
1490
1491# endif /* IEM_WITH_SETJMP */
1492
1493/**
1494 * Fetches the next opcode quad word, returns automatically on failure.
1495 *
1496 * @param a_pu64 Where to return the opcode quad word.
1497 * @remark Implicitly references pVCpu.
1498 */
1499# ifndef IEM_WITH_SETJMP
1500# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1501 do \
1502 { \
1503 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1504 if (rcStrict2 != VINF_SUCCESS) \
1505 return rcStrict2; \
1506 } while (0)
1507# else
1508# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1509# endif
1510
1511/**
1512 * For fetching the opcode bytes for an ModR/M effective address, but throw
1513 * away the result.
1514 *
1515 * This is used when decoding undefined opcodes and such where we want to avoid
1516 * unnecessary MC blocks.
1517 *
1518 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
1519 * used instead. At least for now...
1520 */
1521# ifndef IEM_WITH_SETJMP
1522# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1523 RTGCPTR GCPtrEff; \
1524 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1525 if (rcStrict != VINF_SUCCESS) \
1526 return rcStrict; \
1527 } while (0)
1528# else
1529# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1530 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1531 } while (0)
1532# endif
1533
1534#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1535
1536
1537/** @name Misc Worker Functions.
1538 * @{
1539 */
1540
1541/**
1542 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1543 * not (kind of obsolete now).
1544 *
1545 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1546 */
1547#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1548
1549/**
1550 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1551 *
1552 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1553 * @param a_fEfl The new EFLAGS.
1554 */
1555#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1556
1557
1558/**
1559 * Loads a NULL data selector into a selector register, both the hidden and
1560 * visible parts, in protected mode.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1563 * @param pSReg Pointer to the segment register.
1564 * @param uRpl The RPL.
1565 */
1566DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1567{
1568 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1569 * data selector in protected mode. */
1570 pSReg->Sel = uRpl;
1571 pSReg->ValidSel = uRpl;
1572 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1573 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1574 {
1575 /* VT-x (Intel 3960x) observed doing something like this. */
1576 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1577 pSReg->u32Limit = UINT32_MAX;
1578 pSReg->u64Base = 0;
1579 }
1580 else
1581 {
1582 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1583 pSReg->u32Limit = 0;
1584 pSReg->u64Base = 0;
1585 }
1586}
1587
1588/** @} */
1589
1590
1591/*
1592 *
1593 * Helpers routines.
1594 * Helpers routines.
1595 * Helpers routines.
1596 *
1597 */
1598
1599#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1600
1601/**
1602 * Recalculates the effective operand size.
1603 *
1604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1605 */
1606DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1607{
1608 switch (IEM_GET_CPU_MODE(pVCpu))
1609 {
1610 case IEMMODE_16BIT:
1611 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1612 break;
1613 case IEMMODE_32BIT:
1614 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1615 break;
1616 case IEMMODE_64BIT:
1617 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1618 {
1619 case 0:
1620 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1621 break;
1622 case IEM_OP_PRF_SIZE_OP:
1623 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1624 break;
1625 case IEM_OP_PRF_SIZE_REX_W:
1626 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1627 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1628 break;
1629 }
1630 break;
1631 default:
1632 AssertFailed();
1633 }
1634}
1635
1636
1637/**
1638 * Sets the default operand size to 64-bit and recalculates the effective
1639 * operand size.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1642 */
1643DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1644{
1645 Assert(IEM_IS_64BIT_CODE(pVCpu));
1646 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1647 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1648 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1649 else
1650 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1651}
1652
1653
1654/**
1655 * Sets the default operand size to 64-bit and recalculates the effective
1656 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1657 *
1658 * This is for the relative jumps.
1659 *
1660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1661 */
1662DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1663{
1664 Assert(IEM_IS_64BIT_CODE(pVCpu));
1665 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1666 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1667 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1668 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1669 else
1670 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1671}
1672
1673#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1674
1675
1676
1677/** @name Register Access.
1678 * @{
1679 */
1680
1681/**
1682 * Gets a reference (pointer) to the specified hidden segment register.
1683 *
1684 * @returns Hidden register reference.
1685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1686 * @param iSegReg The segment register.
1687 */
1688DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1689{
1690 Assert(iSegReg < X86_SREG_COUNT);
1691 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1692 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1693
1694 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1695 return pSReg;
1696}
1697
1698
1699/**
1700 * Ensures that the given hidden segment register is up to date.
1701 *
1702 * @returns Hidden register reference.
1703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1704 * @param pSReg The segment register.
1705 */
1706DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1707{
1708 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1709 NOREF(pVCpu);
1710 return pSReg;
1711}
1712
1713
1714/**
1715 * Gets a reference (pointer) to the specified segment register (the selector
1716 * value).
1717 *
1718 * @returns Pointer to the selector variable.
1719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1720 * @param iSegReg The segment register.
1721 */
1722DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1723{
1724 Assert(iSegReg < X86_SREG_COUNT);
1725 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1726 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1727}
1728
1729
1730/**
1731 * Fetches the selector value of a segment register.
1732 *
1733 * @returns The selector value.
1734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1735 * @param iSegReg The segment register.
1736 */
1737DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1738{
1739 Assert(iSegReg < X86_SREG_COUNT);
1740 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1741 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1742}
1743
1744
1745/**
1746 * Fetches the base address value of a segment register.
1747 *
1748 * @returns The selector value.
1749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1750 * @param iSegReg The segment register.
1751 */
1752DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1753{
1754 Assert(iSegReg < X86_SREG_COUNT);
1755 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1756 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1757}
1758
1759
1760/**
1761 * Gets a reference (pointer) to the specified general purpose register.
1762 *
1763 * @returns Register reference.
1764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1765 * @param iReg The general purpose register.
1766 */
1767DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1768{
1769 Assert(iReg < 16);
1770 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1771}
1772
1773
1774#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1775/**
1776 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1777 *
1778 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1779 *
1780 * @returns Register reference.
1781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1782 * @param iReg The register.
1783 */
1784DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1785{
1786 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)))
1787 {
1788 Assert(iReg < 16);
1789 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1790 }
1791 /* high 8-bit register. */
1792 Assert(iReg < 8);
1793 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1794}
1795#endif
1796
1797
1798/**
1799 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1800 * alternative version with extended (20) register index.
1801 *
1802 * @returns Register reference.
1803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1804 * @param iRegEx The register. The 16 first are regular ones,
1805 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1806 */
1807DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1808{
1809 /** @todo This could be done by double indexing on little endian hosts:
1810 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
1811 if (iRegEx < 16)
1812 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1813
1814 /* high 8-bit register. */
1815 Assert(iRegEx < 20);
1816 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1817}
1818
1819
1820/**
1821 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1822 *
1823 * @returns Register reference.
1824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1825 * @param iReg The register.
1826 */
1827DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1828{
1829 Assert(iReg < 16);
1830 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1831}
1832
1833
1834/**
1835 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1836 *
1837 * @returns Register reference.
1838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1839 * @param iReg The register.
1840 */
1841DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1842{
1843 Assert(iReg < 16);
1844 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1845}
1846
1847
1848/**
1849 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1850 *
1851 * @returns Register reference.
1852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1853 * @param iReg The register.
1854 */
1855DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1856{
1857 Assert(iReg < 16);
1858 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1859}
1860
1861
1862/**
1863 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1864 *
1865 * @returns Register reference.
1866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1867 * @param iReg The register.
1868 */
1869DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1870{
1871 Assert(iReg < 64);
1872 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1873}
1874
1875
1876/**
1877 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1878 *
1879 * @returns Register reference.
1880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1881 * @param iReg The register.
1882 */
1883DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1884{
1885 Assert(iReg < 16);
1886 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1887}
1888
1889
1890/**
1891 * Gets a reference (pointer) to the specified segment register's base address.
1892 *
1893 * @returns Segment register base address reference.
1894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1895 * @param iSegReg The segment selector.
1896 */
1897DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1898{
1899 Assert(iSegReg < X86_SREG_COUNT);
1900 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1901 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1902}
1903
1904
1905#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1906/**
1907 * Fetches the value of a 8-bit general purpose register.
1908 *
1909 * @returns The register value.
1910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1911 * @param iReg The register.
1912 */
1913DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1914{
1915 return *iemGRegRefU8(pVCpu, iReg);
1916}
1917#endif
1918
1919
1920/**
1921 * Fetches the value of a 8-bit general purpose register, alternative version
1922 * with extended (20) register index.
1923
1924 * @returns The register value.
1925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1926 * @param iRegEx The register. The 16 first are regular ones,
1927 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1928 */
1929DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1930{
1931 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1932}
1933
1934
1935/**
1936 * Fetches the value of a 16-bit general purpose register.
1937 *
1938 * @returns The register value.
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 * @param iReg The register.
1941 */
1942DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1943{
1944 Assert(iReg < 16);
1945 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1946}
1947
1948
1949/**
1950 * Fetches the value of a 32-bit general purpose register.
1951 *
1952 * @returns The register value.
1953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1954 * @param iReg The register.
1955 */
1956DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1957{
1958 Assert(iReg < 16);
1959 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1960}
1961
1962
1963/**
1964 * Fetches the value of a 64-bit general purpose register.
1965 *
1966 * @returns The register value.
1967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1968 * @param iReg The register.
1969 */
1970DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1971{
1972 Assert(iReg < 16);
1973 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1974}
1975
1976
1977/**
1978 * Stores a 16-bit value to a general purpose register.
1979 *
1980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1981 * @param iReg The register.
1982 * @param uValue The value to store.
1983 */
1984DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
1985{
1986 Assert(iReg < 16);
1987 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
1988}
1989
1990
1991/**
1992 * Stores a 32-bit value to a general purpose register, implicitly clearing high
1993 * values.
1994 *
1995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1996 * @param iReg The register.
1997 * @param uValue The value to store.
1998 */
1999DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
2000{
2001 Assert(iReg < 16);
2002 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
2003}
2004
2005
2006/**
2007 * Stores a 64-bit value to a general purpose register.
2008 *
2009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2010 * @param iReg The register.
2011 * @param uValue The value to store.
2012 */
2013DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
2014{
2015 Assert(iReg < 16);
2016 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
2017}
2018
2019
2020/**
2021 * Get the address of the top of the stack.
2022 *
2023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2024 */
2025DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
2026{
2027 if (IEM_IS_64BIT_CODE(pVCpu))
2028 return pVCpu->cpum.GstCtx.rsp;
2029 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2030 return pVCpu->cpum.GstCtx.esp;
2031 return pVCpu->cpum.GstCtx.sp;
2032}
2033
2034
2035/**
2036 * Updates the RIP/EIP/IP to point to the next instruction.
2037 *
2038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2039 * @param cbInstr The number of bytes to add.
2040 */
2041DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2042{
2043 /*
2044 * Advance RIP.
2045 *
2046 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
2047 * while in all other modes except LM64 the updates are 32-bit. This means
2048 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
2049 * 4GB and 64KB rollovers, and decide whether anything needs masking.
2050 *
2051 * See PC wrap around tests in bs3-cpu-weird-1.
2052 */
2053 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
2054 uint64_t const uRipNext = uRipPrev + cbInstr;
2055 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
2056 || IEM_IS_64BIT_CODE(pVCpu)))
2057 pVCpu->cpum.GstCtx.rip = uRipNext;
2058 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
2059 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
2060 else
2061 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
2062}
2063
2064
2065/**
2066 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
2067 * following EFLAGS bits are set:
2068 * - X86_EFL_RF - clear it.
2069 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
2070 * - X86_EFL_TF - generate single step \#DB trap.
2071 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
2072 * instruction).
2073 *
2074 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
2075 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
2076 * takes priority over both NMIs and hardware interrupts. So, neither is
2077 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
2078 * either unsupported will be triggered on-top of any \#DB raised here.)
2079 *
2080 * The RF flag only needs to be cleared here as it only suppresses instruction
2081 * breakpoints which are not raised here (happens synchronously during
2082 * instruction fetching).
2083 *
2084 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
2085 * status has no bearing on whether \#DB exceptions are raised.
2086 *
2087 * @note This must *NOT* be called by the two instructions setting the
2088 * CPUMCTX_INHIBIT_SHADOW_SS flag.
2089 *
2090 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
2091 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
2092 * Stacks}
2093 */
2094static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2095{
2096 /*
2097 * Normally we're just here to clear RF and/or interrupt shadow bits.
2098 */
2099 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
2100 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
2101 else
2102 {
2103 /*
2104 * Raise a #DB or/and DBGF event.
2105 */
2106 VBOXSTRICTRC rcStrict;
2107 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
2108 {
2109 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2110 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2111 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
2112 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2113 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2114 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2115 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2116 pVCpu->cpum.GstCtx.rflags.uBoth));
2117
2118 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2119 rcStrict = iemRaiseDebugException(pVCpu);
2120
2121 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2122 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2123 {
2124 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2125 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2126 }
2127 }
2128 else
2129 {
2130 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2131 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2132 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2133 }
2134 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2135 Assert(rcStrict != VINF_SUCCESS);
2136 return rcStrict;
2137 }
2138 return rcNormal;
2139}
2140
2141
2142/**
2143 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2144 *
2145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2146 * @param rcNormal VINF_SUCCESS to continue TB.
2147 * VINF_IEM_REEXEC_BREAK to force TB exit when
2148 * taking the wrong conditional branhc.
2149 */
2150DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2151{
2152 /*
2153 * We assume that most of the time nothing actually needs doing here.
2154 */
2155 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2156 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2157 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2158 return rcNormal;
2159 return iemFinishInstructionWithFlagsSet(pVCpu, rcNormal);
2160}
2161
2162
2163/**
2164 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2165 * and CPUMCTX_INHIBIT_SHADOW.
2166 *
2167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2168 * @param cbInstr The number of bytes to add.
2169 */
2170DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2171{
2172 iemRegAddToRip(pVCpu, cbInstr);
2173 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2174}
2175
2176
2177/**
2178 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2179 * and CPUMCTX_INHIBIT_SHADOW.
2180 *
2181 * Only called from 64-bit code.
2182 *
2183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2184 * @param cbInstr The number of bytes to add.
2185 * @param rcNormal VINF_SUCCESS to continue TB.
2186 * VINF_IEM_REEXEC_BREAK to force TB exit when
2187 * taking the wrong conditional branhc.
2188 */
2189DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2190{
2191 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2192 return iemRegFinishClearingRF(pVCpu, rcNormal);
2193}
2194
2195
2196/**
2197 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2198 * CPUMCTX_INHIBIT_SHADOW.
2199 *
2200 * This is never from 64-bit code.
2201 *
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param cbInstr The number of bytes to add.
2204 * @param rcNormal VINF_SUCCESS to continue TB.
2205 * VINF_IEM_REEXEC_BREAK to force TB exit when
2206 * taking the wrong conditional branhc.
2207 */
2208DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2209{
2210 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2211 return iemRegFinishClearingRF(pVCpu, rcNormal);
2212}
2213
2214
2215/**
2216 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
2217 * CPUMCTX_INHIBIT_SHADOW.
2218 *
2219 * This is only ever used from 16-bit code on a pre-386 CPU.
2220 *
2221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2222 * @param cbInstr The number of bytes to add.
2223 * @param rcNormal VINF_SUCCESS to continue TB.
2224 * VINF_IEM_REEXEC_BREAK to force TB exit when
2225 * taking the wrong conditional branhc.
2226 */
2227DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2228{
2229 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2230 return iemRegFinishClearingRF(pVCpu, rcNormal);
2231}
2232
2233
2234/**
2235 * Tail method for a finish function that does't clear flags or raise \#DB.
2236 *
2237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2238 * @param rcNormal VINF_SUCCESS to continue TB.
2239 * VINF_IEM_REEXEC_BREAK to force TB exit when
2240 * taking the wrong conditional branhc.
2241 */
2242DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishNoFlags(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2243{
2244 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2245 Assert(!( pVCpu->cpum.GstCtx.eflags.uBoth
2246 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) );
2247 RT_NOREF(pVCpu);
2248 return rcNormal;
2249}
2250
2251
2252/**
2253 * Updates the RIP to point to the next instruction, but does not need to clear
2254 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2255 *
2256 * Only called from 64-bit code.
2257 *
2258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2259 * @param cbInstr The number of bytes to add.
2260 * @param rcNormal VINF_SUCCESS to continue TB.
2261 * VINF_IEM_REEXEC_BREAK to force TB exit when
2262 * taking the wrong conditional branhc.
2263 */
2264DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2265{
2266 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2267 return iemRegFinishNoFlags(pVCpu, rcNormal);
2268}
2269
2270
2271/**
2272 * Updates the EIP to point to the next instruction, but does not need to clear
2273 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2274 *
2275 * This is never from 64-bit code.
2276 *
2277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2278 * @param cbInstr The number of bytes to add.
2279 * @param rcNormal VINF_SUCCESS to continue TB.
2280 * VINF_IEM_REEXEC_BREAK to force TB exit when
2281 * taking the wrong conditional branhc.
2282 */
2283DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2284{
2285 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2286 return iemRegFinishNoFlags(pVCpu, rcNormal);
2287}
2288
2289
2290/**
2291 * Updates the IP to point to the next instruction, but does not need to clear
2292 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2293 *
2294 * This is only ever used from 16-bit code on a pre-386 CPU.
2295 *
2296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2297 * @param cbInstr The number of bytes to add.
2298 * @param rcNormal VINF_SUCCESS to continue TB.
2299 * VINF_IEM_REEXEC_BREAK to force TB exit when
2300 * taking the wrong conditional branhc.
2301 *
2302 */
2303DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2304{
2305 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2306 return iemRegFinishNoFlags(pVCpu, rcNormal);
2307}
2308
2309
2310/**
2311 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
2312 *
2313 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2314 * segment limit.
2315 *
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param cbInstr Instruction size.
2318 * @param offNextInstr The offset of the next instruction.
2319 * @param enmEffOpSize Effective operand size.
2320 * @param rcNormal VINF_SUCCESS to continue TB.
2321 * VINF_IEM_REEXEC_BREAK to force TB exit when
2322 * taking the wrong conditional branhc.
2323 */
2324DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2325 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2326{
2327 Assert(IEM_IS_64BIT_CODE(pVCpu));
2328 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2329
2330 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2331 if (enmEffOpSize == IEMMODE_16BIT)
2332 uNewRip &= UINT16_MAX;
2333
2334 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2335 pVCpu->cpum.GstCtx.rip = uNewRip;
2336 else
2337 return iemRaiseGeneralProtectionFault0(pVCpu);
2338
2339#ifndef IEM_WITH_CODE_TLB
2340 iemOpcodeFlushLight(pVCpu, cbInstr);
2341#endif
2342
2343 /*
2344 * Clear RF and finish the instruction (maybe raise #DB).
2345 */
2346 return iemRegFinishClearingRF(pVCpu, rcNormal);
2347}
2348
2349
2350/**
2351 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2352 * code (never 64-bit).
2353 *
2354 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2355 * segment limit.
2356 *
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param cbInstr Instruction size.
2359 * @param offNextInstr The offset of the next instruction.
2360 * @param enmEffOpSize Effective operand size.
2361 * @param rcNormal VINF_SUCCESS to continue TB.
2362 * VINF_IEM_REEXEC_BREAK to force TB exit when
2363 * taking the wrong conditional branhc.
2364 */
2365DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2366 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2367{
2368 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2369 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2370
2371 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2372 if (enmEffOpSize == IEMMODE_16BIT)
2373 uNewEip &= UINT16_MAX;
2374 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2375 pVCpu->cpum.GstCtx.rip = uNewEip;
2376 else
2377 return iemRaiseGeneralProtectionFault0(pVCpu);
2378
2379#ifndef IEM_WITH_CODE_TLB
2380 iemOpcodeFlushLight(pVCpu, cbInstr);
2381#endif
2382
2383 /*
2384 * Clear RF and finish the instruction (maybe raise #DB).
2385 */
2386 return iemRegFinishClearingRF(pVCpu, rcNormal);
2387}
2388
2389
2390/**
2391 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
2392 *
2393 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2394 * segment limit.
2395 *
2396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2397 * @param cbInstr Instruction size.
2398 * @param offNextInstr The offset of the next instruction.
2399 * @param rcNormal VINF_SUCCESS to continue TB.
2400 * VINF_IEM_REEXEC_BREAK to force TB exit when
2401 * taking the wrong conditional branhc.
2402 */
2403DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2404 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
2405{
2406 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2407
2408 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2409 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2410 pVCpu->cpum.GstCtx.rip = uNewIp;
2411 else
2412 return iemRaiseGeneralProtectionFault0(pVCpu);
2413
2414#ifndef IEM_WITH_CODE_TLB
2415 iemOpcodeFlushLight(pVCpu, cbInstr);
2416#endif
2417
2418 /*
2419 * Clear RF and finish the instruction (maybe raise #DB).
2420 */
2421 return iemRegFinishClearingRF(pVCpu, rcNormal);
2422}
2423
2424
2425/**
2426 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
2427 * clearing of flags.
2428 *
2429 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2430 * segment limit.
2431 *
2432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2433 * @param cbInstr Instruction size.
2434 * @param offNextInstr The offset of the next instruction.
2435 * @param enmEffOpSize Effective operand size.
2436 * @param rcNormal VINF_SUCCESS to continue TB.
2437 * VINF_IEM_REEXEC_BREAK to force TB exit when
2438 * taking the wrong conditional branhc.
2439 */
2440DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2441 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2442{
2443 Assert(IEM_IS_64BIT_CODE(pVCpu));
2444 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2445
2446 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2447 if (enmEffOpSize == IEMMODE_16BIT)
2448 uNewRip &= UINT16_MAX;
2449
2450 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2451 pVCpu->cpum.GstCtx.rip = uNewRip;
2452 else
2453 return iemRaiseGeneralProtectionFault0(pVCpu);
2454
2455#ifndef IEM_WITH_CODE_TLB
2456 iemOpcodeFlushLight(pVCpu, cbInstr);
2457#endif
2458 return iemRegFinishNoFlags(pVCpu, rcNormal);
2459}
2460
2461
2462/**
2463 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2464 * code (never 64-bit), no checking or clearing of flags.
2465 *
2466 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2467 * segment limit.
2468 *
2469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2470 * @param cbInstr Instruction size.
2471 * @param offNextInstr The offset of the next instruction.
2472 * @param enmEffOpSize Effective operand size.
2473 * @param rcNormal VINF_SUCCESS to continue TB.
2474 * VINF_IEM_REEXEC_BREAK to force TB exit when
2475 * taking the wrong conditional branhc.
2476 */
2477DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2478 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2479{
2480 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2481 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2482
2483 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2484 if (enmEffOpSize == IEMMODE_16BIT)
2485 uNewEip &= UINT16_MAX;
2486 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2487 pVCpu->cpum.GstCtx.rip = uNewEip;
2488 else
2489 return iemRaiseGeneralProtectionFault0(pVCpu);
2490
2491#ifndef IEM_WITH_CODE_TLB
2492 iemOpcodeFlushLight(pVCpu, cbInstr);
2493#endif
2494 return iemRegFinishNoFlags(pVCpu, rcNormal);
2495}
2496
2497
2498/**
2499 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
2500 * clearing of flags.
2501 *
2502 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2503 * segment limit.
2504 *
2505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2506 * @param cbInstr Instruction size.
2507 * @param offNextInstr The offset of the next instruction.
2508 * @param rcNormal VINF_SUCCESS to continue TB.
2509 * VINF_IEM_REEXEC_BREAK to force TB exit when
2510 * taking the wrong conditional branhc.
2511 */
2512DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2513 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
2514{
2515 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2516
2517 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2518 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2519 pVCpu->cpum.GstCtx.rip = uNewIp;
2520 else
2521 return iemRaiseGeneralProtectionFault0(pVCpu);
2522
2523#ifndef IEM_WITH_CODE_TLB
2524 iemOpcodeFlushLight(pVCpu, cbInstr);
2525#endif
2526 return iemRegFinishNoFlags(pVCpu, rcNormal);
2527}
2528
2529
2530/**
2531 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
2532 *
2533 * @returns Strict VBox status code.
2534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2535 * @param cbInstr Instruction size.
2536 * @param offNextInstr The offset of the next instruction.
2537 * @param rcNormal VINF_SUCCESS to continue TB.
2538 * VINF_IEM_REEXEC_BREAK to force TB exit when
2539 * taking the wrong conditional branhc.
2540 */
2541DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2542 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2543{
2544 Assert(IEM_IS_64BIT_CODE(pVCpu));
2545
2546 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2547
2548#ifndef IEM_WITH_CODE_TLB
2549 iemOpcodeFlushLight(pVCpu, cbInstr);
2550#endif
2551
2552 /*
2553 * Clear RF and finish the instruction (maybe raise #DB).
2554 */
2555 return iemRegFinishClearingRF(pVCpu, rcNormal);
2556}
2557
2558
2559/**
2560 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
2561 *
2562 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2563 * segment limit.
2564 *
2565 * @returns Strict VBox status code.
2566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2567 * @param cbInstr Instruction size.
2568 * @param offNextInstr The offset of the next instruction.
2569 * @param rcNormal VINF_SUCCESS to continue TB.
2570 * VINF_IEM_REEXEC_BREAK to force TB exit when
2571 * taking the wrong conditional branhc.
2572 *
2573 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2574 * identical.
2575 */
2576DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2577 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2578{
2579 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2580
2581 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2582 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2583 pVCpu->cpum.GstCtx.rip = uNewIp;
2584 else
2585 return iemRaiseGeneralProtectionFault0(pVCpu);
2586
2587#ifndef IEM_WITH_CODE_TLB
2588 iemOpcodeFlushLight(pVCpu, cbInstr);
2589#endif
2590
2591 /*
2592 * Clear RF and finish the instruction (maybe raise #DB).
2593 */
2594 return iemRegFinishClearingRF(pVCpu, rcNormal);
2595}
2596
2597
2598/**
2599 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
2600 * clearing of flags.
2601 *
2602 * @returns Strict VBox status code.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 * @param cbInstr Instruction size.
2605 * @param offNextInstr The offset of the next instruction.
2606 * @param rcNormal VINF_SUCCESS to continue TB.
2607 * VINF_IEM_REEXEC_BREAK to force TB exit when
2608 * taking the wrong conditional branhc.
2609 */
2610DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2611 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2612{
2613 Assert(IEM_IS_64BIT_CODE(pVCpu));
2614
2615 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2616
2617#ifndef IEM_WITH_CODE_TLB
2618 iemOpcodeFlushLight(pVCpu, cbInstr);
2619#endif
2620 return iemRegFinishNoFlags(pVCpu, rcNormal);
2621}
2622
2623
2624/**
2625 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
2626 * no checking or clearing of flags.
2627 *
2628 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2629 * segment limit.
2630 *
2631 * @returns Strict VBox status code.
2632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2633 * @param cbInstr Instruction size.
2634 * @param offNextInstr The offset of the next instruction.
2635 * @param rcNormal VINF_SUCCESS to continue TB.
2636 * VINF_IEM_REEXEC_BREAK to force TB exit when
2637 * taking the wrong conditional branhc.
2638 *
2639 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2640 * identical.
2641 */
2642DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2643 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2644{
2645 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2646
2647 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2648 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2649 pVCpu->cpum.GstCtx.rip = uNewIp;
2650 else
2651 return iemRaiseGeneralProtectionFault0(pVCpu);
2652
2653#ifndef IEM_WITH_CODE_TLB
2654 iemOpcodeFlushLight(pVCpu, cbInstr);
2655#endif
2656 return iemRegFinishNoFlags(pVCpu, rcNormal);
2657}
2658
2659
2660/**
2661 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2662 *
2663 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2664 * segment limit.
2665 *
2666 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2667 * only alternative for relative jumps in 64-bit code and that is already
2668 * handled in the decoder stage.
2669 *
2670 * @returns Strict VBox status code.
2671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2672 * @param cbInstr Instruction size.
2673 * @param offNextInstr The offset of the next instruction.
2674 * @param rcNormal VINF_SUCCESS to continue TB.
2675 * VINF_IEM_REEXEC_BREAK to force TB exit when
2676 * taking the wrong conditional branhc.
2677 */
2678DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2679 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2680{
2681 Assert(IEM_IS_64BIT_CODE(pVCpu));
2682
2683 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2684 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2685 pVCpu->cpum.GstCtx.rip = uNewRip;
2686 else
2687 return iemRaiseGeneralProtectionFault0(pVCpu);
2688
2689#ifndef IEM_WITH_CODE_TLB
2690 iemOpcodeFlushLight(pVCpu, cbInstr);
2691#endif
2692
2693 /*
2694 * Clear RF and finish the instruction (maybe raise #DB).
2695 */
2696 return iemRegFinishClearingRF(pVCpu, rcNormal);
2697}
2698
2699
2700/**
2701 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2702 *
2703 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2704 * segment limit.
2705 *
2706 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2707 * only alternative for relative jumps in 32-bit code and that is already
2708 * handled in the decoder stage.
2709 *
2710 * @returns Strict VBox status code.
2711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2712 * @param cbInstr Instruction size.
2713 * @param offNextInstr The offset of the next instruction.
2714 * @param rcNormal VINF_SUCCESS to continue TB.
2715 * VINF_IEM_REEXEC_BREAK to force TB exit when
2716 * taking the wrong conditional branhc.
2717 */
2718DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2719 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2720{
2721 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2722 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2723
2724 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2725 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2726 pVCpu->cpum.GstCtx.rip = uNewEip;
2727 else
2728 return iemRaiseGeneralProtectionFault0(pVCpu);
2729
2730#ifndef IEM_WITH_CODE_TLB
2731 iemOpcodeFlushLight(pVCpu, cbInstr);
2732#endif
2733
2734 /*
2735 * Clear RF and finish the instruction (maybe raise #DB).
2736 */
2737 return iemRegFinishClearingRF(pVCpu, rcNormal);
2738}
2739
2740
2741/**
2742 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
2743 * clearing of flags.
2744 *
2745 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2746 * segment limit.
2747 *
2748 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2749 * only alternative for relative jumps in 64-bit code and that is already
2750 * handled in the decoder stage.
2751 *
2752 * @returns Strict VBox status code.
2753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2754 * @param cbInstr Instruction size.
2755 * @param offNextInstr The offset of the next instruction.
2756 * @param rcNormal VINF_SUCCESS to continue TB.
2757 * VINF_IEM_REEXEC_BREAK to force TB exit when
2758 * taking the wrong conditional branhc.
2759 */
2760DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2761 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2762{
2763 Assert(IEM_IS_64BIT_CODE(pVCpu));
2764
2765 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2766 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2767 pVCpu->cpum.GstCtx.rip = uNewRip;
2768 else
2769 return iemRaiseGeneralProtectionFault0(pVCpu);
2770
2771#ifndef IEM_WITH_CODE_TLB
2772 iemOpcodeFlushLight(pVCpu, cbInstr);
2773#endif
2774 return iemRegFinishNoFlags(pVCpu, rcNormal);
2775}
2776
2777
2778/**
2779 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
2780 * clearing of flags.
2781 *
2782 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2783 * segment limit.
2784 *
2785 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2786 * only alternative for relative jumps in 32-bit code and that is already
2787 * handled in the decoder stage.
2788 *
2789 * @returns Strict VBox status code.
2790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2791 * @param cbInstr Instruction size.
2792 * @param offNextInstr The offset of the next instruction.
2793 * @param rcNormal VINF_SUCCESS to continue TB.
2794 * VINF_IEM_REEXEC_BREAK to force TB exit when
2795 * taking the wrong conditional branhc.
2796 */
2797DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2798 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2799{
2800 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2801 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2802
2803 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2804 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2805 pVCpu->cpum.GstCtx.rip = uNewEip;
2806 else
2807 return iemRaiseGeneralProtectionFault0(pVCpu);
2808
2809#ifndef IEM_WITH_CODE_TLB
2810 iemOpcodeFlushLight(pVCpu, cbInstr);
2811#endif
2812 return iemRegFinishNoFlags(pVCpu, rcNormal);
2813}
2814
2815
2816/**
2817 * Extended version of iemFinishInstructionWithFlagsSet that goes with
2818 * iemRegAddToRipAndFinishingClearingRfEx.
2819 *
2820 * See iemFinishInstructionWithFlagsSet() for details.
2821 */
2822static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2823{
2824 /*
2825 * Raise a #DB.
2826 */
2827 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2828 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2829 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
2830 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2831 /** @todo Do we set all pending \#DB events, or just one? */
2832 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
2833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2834 pVCpu->cpum.GstCtx.rflags.uBoth));
2835 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2836 return iemRaiseDebugException(pVCpu);
2837}
2838
2839
2840/**
2841 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
2842 * others potentially updating EFLAGS.TF.
2843 *
2844 * The single step event must be generated using the TF value at the start of
2845 * the instruction, not the new value set by it.
2846 *
2847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2848 * @param cbInstr The number of bytes to add.
2849 * @param fEflOld The EFLAGS at the start of the instruction
2850 * execution.
2851 */
2852DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
2853{
2854 iemRegAddToRip(pVCpu, cbInstr);
2855 if (!(fEflOld & X86_EFL_TF))
2856 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2857 return iemFinishInstructionWithTfSet(pVCpu);
2858}
2859
2860
2861#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2862/**
2863 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
2864 *
2865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2866 */
2867DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2868{
2869 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2870}
2871#endif
2872
2873
2874#ifdef IEM_WITH_CODE_TLB
2875
2876/**
2877 * Performs a near jump to the specified address, no checking or clearing of
2878 * flags
2879 *
2880 * May raise a \#GP(0) if the new IP outside the code segment limit.
2881 *
2882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2883 * @param uNewIp The new IP value.
2884 */
2885DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishNoFlags(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
2886{
2887 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
2888 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
2889 pVCpu->cpum.GstCtx.rip = uNewIp;
2890 else
2891 return iemRaiseGeneralProtectionFault0(pVCpu);
2892 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2893}
2894
2895
2896/**
2897 * Performs a near jump to the specified address, no checking or clearing of
2898 * flags
2899 *
2900 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
2901 *
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param uNewEip The new EIP value.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishNoFlags(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
2906{
2907 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2908 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2909 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2910 pVCpu->cpum.GstCtx.rip = uNewEip;
2911 else
2912 return iemRaiseGeneralProtectionFault0(pVCpu);
2913 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2914}
2915
2916
2917/**
2918 * Performs a near jump to the specified address, no checking or clearing of
2919 * flags.
2920 *
2921 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2922 * segment limit.
2923 *
2924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2925 * @param uNewRip The new RIP value.
2926 */
2927DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishNoFlags(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
2928{
2929 Assert(IEM_IS_64BIT_CODE(pVCpu));
2930 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2931 pVCpu->cpum.GstCtx.rip = uNewRip;
2932 else
2933 return iemRaiseGeneralProtectionFault0(pVCpu);
2934 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2935}
2936
2937#endif /* IEM_WITH_CODE_TLB */
2938
2939/**
2940 * Performs a near jump to the specified address.
2941 *
2942 * May raise a \#GP(0) if the new IP outside the code segment limit.
2943 *
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 * @param uNewIp The new IP value.
2946 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2947 */
2948DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishClearingRF(PVMCPUCC pVCpu, uint16_t uNewIp, uint8_t cbInstr) RT_NOEXCEPT
2949{
2950 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
2951 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
2952 pVCpu->cpum.GstCtx.rip = uNewIp;
2953 else
2954 return iemRaiseGeneralProtectionFault0(pVCpu);
2955#ifndef IEM_WITH_CODE_TLB
2956 iemOpcodeFlushLight(pVCpu, cbInstr);
2957#else
2958 RT_NOREF_PV(cbInstr);
2959#endif
2960 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2961}
2962
2963
2964/**
2965 * Performs a near jump to the specified address.
2966 *
2967 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
2968 *
2969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2970 * @param uNewEip The new EIP value.
2971 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2972 */
2973DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishClearingRF(PVMCPUCC pVCpu, uint32_t uNewEip, uint8_t cbInstr) RT_NOEXCEPT
2974{
2975 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2976 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2977 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2978 pVCpu->cpum.GstCtx.rip = uNewEip;
2979 else
2980 return iemRaiseGeneralProtectionFault0(pVCpu);
2981#ifndef IEM_WITH_CODE_TLB
2982 iemOpcodeFlushLight(pVCpu, cbInstr);
2983#else
2984 RT_NOREF_PV(cbInstr);
2985#endif
2986 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2987}
2988
2989
2990/**
2991 * Performs a near jump to the specified address.
2992 *
2993 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2994 * segment limit.
2995 *
2996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2997 * @param uNewRip The new RIP value.
2998 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2999 */
3000DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishClearingRF(PVMCPUCC pVCpu, uint64_t uNewRip, uint8_t cbInstr) RT_NOEXCEPT
3001{
3002 Assert(IEM_IS_64BIT_CODE(pVCpu));
3003 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
3004 pVCpu->cpum.GstCtx.rip = uNewRip;
3005 else
3006 return iemRaiseGeneralProtectionFault0(pVCpu);
3007#ifndef IEM_WITH_CODE_TLB
3008 iemOpcodeFlushLight(pVCpu, cbInstr);
3009#else
3010 RT_NOREF_PV(cbInstr);
3011#endif
3012 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3013}
3014
3015
3016/**
3017 * Implements a 16-bit relative call, no checking or clearing of
3018 * flags.
3019 *
3020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3021 * @param cbInstr The instruction length.
3022 * @param offDisp The 16-bit displacement.
3023 */
3024DECL_FORCE_INLINE(VBOXSTRICTRC)
3025iemRegRipRelativeCallS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offDisp) RT_NOEXCEPT
3026{
3027 uint16_t const uOldIp = pVCpu->cpum.GstCtx.ip + cbInstr;
3028 uint16_t const uNewIp = uOldIp + offDisp;
3029 if ( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
3030 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */)
3031 { /* likely */ }
3032 else
3033 return iemRaiseGeneralProtectionFault0(pVCpu);
3034
3035 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldIp);
3036 if (rcStrict == VINF_SUCCESS)
3037 { /* likely */ }
3038 else
3039 return rcStrict;
3040
3041 pVCpu->cpum.GstCtx.rip = uNewIp;
3042#ifndef IEM_WITH_CODE_TLB
3043 iemOpcodeFlushLight(pVCpu, cbInstr);
3044#endif
3045 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3046}
3047
3048
3049/**
3050 * Implements a 16-bit relative call.
3051 *
3052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3053 * @param cbInstr The instruction length.
3054 * @param offDisp The 16-bit displacement.
3055 */
3056DECL_FORCE_INLINE(VBOXSTRICTRC)
3057iemRegRipRelativeCallS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offDisp) RT_NOEXCEPT
3058{
3059 uint16_t const uOldIp = pVCpu->cpum.GstCtx.ip + cbInstr;
3060 uint16_t const uNewIp = uOldIp + offDisp;
3061 if ( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
3062 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */)
3063 { /* likely */ }
3064 else
3065 return iemRaiseGeneralProtectionFault0(pVCpu);
3066
3067 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldIp);
3068 if (rcStrict == VINF_SUCCESS)
3069 { /* likely */ }
3070 else
3071 return rcStrict;
3072
3073 pVCpu->cpum.GstCtx.rip = uNewIp;
3074#ifndef IEM_WITH_CODE_TLB
3075 iemOpcodeFlushLight(pVCpu, cbInstr);
3076#endif
3077 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3078}
3079
3080
3081/**
3082 * Implements a 32-bit relative call, no checking or clearing of flags.
3083 *
3084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3085 * @param cbInstr The instruction length.
3086 * @param offDisp The 32-bit displacement.
3087 */
3088DECL_FORCE_INLINE(VBOXSTRICTRC)
3089iemRegEip32RelativeCallS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offDisp) RT_NOEXCEPT
3090{
3091 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
3092
3093 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3094 uint32_t const uNewRip = uOldRip + offDisp;
3095 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3096 { /* likely */ }
3097 else
3098 return iemRaiseGeneralProtectionFault0(pVCpu);
3099
3100 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3101 if (rcStrict == VINF_SUCCESS)
3102 { /* likely */ }
3103 else
3104 return rcStrict;
3105
3106 pVCpu->cpum.GstCtx.rip = uNewRip;
3107#ifndef IEM_WITH_CODE_TLB
3108 iemOpcodeFlushLight(pVCpu, cbInstr);
3109#endif
3110 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3111}
3112
3113
3114/**
3115 * Implements a 32-bit relative call.
3116 *
3117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3118 * @param cbInstr The instruction length.
3119 * @param offDisp The 32-bit displacement.
3120 */
3121DECL_FORCE_INLINE(VBOXSTRICTRC)
3122iemRegEip32RelativeCallS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offDisp) RT_NOEXCEPT
3123{
3124 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
3125
3126 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3127 uint32_t const uNewRip = uOldRip + offDisp;
3128 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3129 { /* likely */ }
3130 else
3131 return iemRaiseGeneralProtectionFault0(pVCpu);
3132
3133 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3134 if (rcStrict == VINF_SUCCESS)
3135 { /* likely */ }
3136 else
3137 return rcStrict;
3138
3139 pVCpu->cpum.GstCtx.rip = uNewRip;
3140#ifndef IEM_WITH_CODE_TLB
3141 iemOpcodeFlushLight(pVCpu, cbInstr);
3142#endif
3143 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3144}
3145
3146
3147/**
3148 * Implements a 64-bit relative call, no checking or clearing of flags.
3149 *
3150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3151 * @param cbInstr The instruction length.
3152 * @param offDisp The 64-bit displacement.
3153 */
3154DECL_FORCE_INLINE(VBOXSTRICTRC)
3155iemRegRip64RelativeCallS64AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int64_t offDisp) RT_NOEXCEPT
3156{
3157 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3158 uint64_t const uNewRip = uOldRip + (int64_t)offDisp;
3159 if (IEM_IS_CANONICAL(uNewRip))
3160 { /* likely */ }
3161 else
3162 return iemRaiseNotCanonical(pVCpu);
3163
3164 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3165 if (rcStrict == VINF_SUCCESS)
3166 { /* likely */ }
3167 else
3168 return rcStrict;
3169
3170 pVCpu->cpum.GstCtx.rip = uNewRip;
3171#ifndef IEM_WITH_CODE_TLB
3172 iemOpcodeFlushLight(pVCpu, cbInstr);
3173#endif
3174 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3175}
3176
3177
3178/**
3179 * Implements a 64-bit relative call.
3180 *
3181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3182 * @param cbInstr The instruction length.
3183 * @param offDisp The 64-bit displacement.
3184 */
3185DECL_FORCE_INLINE(VBOXSTRICTRC)
3186iemRegRip64RelativeCallS64AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int64_t offDisp) RT_NOEXCEPT
3187{
3188 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3189 uint64_t const uNewRip = uOldRip + (int64_t)offDisp;
3190 if (IEM_IS_CANONICAL(uNewRip))
3191 { /* likely */ }
3192 else
3193 return iemRaiseNotCanonical(pVCpu);
3194
3195 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3196 if (rcStrict == VINF_SUCCESS)
3197 { /* likely */ }
3198 else
3199 return rcStrict;
3200
3201 pVCpu->cpum.GstCtx.rip = uNewRip;
3202#ifndef IEM_WITH_CODE_TLB
3203 iemOpcodeFlushLight(pVCpu, cbInstr);
3204#endif
3205 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3206}
3207
3208
3209/**
3210 * Implements an 16-bit indirect call, no checking or clearing of
3211 * flags.
3212 *
3213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3214 * @param cbInstr The instruction length.
3215 * @param uNewRip The new RIP value.
3216 */
3217DECL_FORCE_INLINE(VBOXSTRICTRC)
3218iemRegIp16IndirectCallU16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3219{
3220 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3221 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3222 { /* likely */ }
3223 else
3224 return iemRaiseGeneralProtectionFault0(pVCpu);
3225
3226 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3227 if (rcStrict == VINF_SUCCESS)
3228 { /* likely */ }
3229 else
3230 return rcStrict;
3231
3232 pVCpu->cpum.GstCtx.rip = uNewRip;
3233#ifndef IEM_WITH_CODE_TLB
3234 iemOpcodeFlushLight(pVCpu, cbInstr);
3235#endif
3236 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3237}
3238
3239
3240/**
3241 * Implements an 16-bit indirect call, no checking or clearing of
3242 * flags.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3245 * @param cbInstr The instruction length.
3246 * @param uNewRip The new RIP value.
3247 */
3248DECL_FORCE_INLINE(VBOXSTRICTRC)
3249iemRegEip32IndirectCallU16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3250{
3251 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3252 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3253 { /* likely */ }
3254 else
3255 return iemRaiseGeneralProtectionFault0(pVCpu);
3256
3257 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3258 if (rcStrict == VINF_SUCCESS)
3259 { /* likely */ }
3260 else
3261 return rcStrict;
3262
3263 pVCpu->cpum.GstCtx.rip = uNewRip;
3264#ifndef IEM_WITH_CODE_TLB
3265 iemOpcodeFlushLight(pVCpu, cbInstr);
3266#endif
3267 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3268}
3269
3270
3271/**
3272 * Implements an 16-bit indirect call.
3273 *
3274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3275 * @param cbInstr The instruction length.
3276 * @param uNewRip The new RIP value.
3277 */
3278DECL_FORCE_INLINE(VBOXSTRICTRC)
3279iemRegIp16IndirectCallU16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3280{
3281 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3282 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3283 { /* likely */ }
3284 else
3285 return iemRaiseGeneralProtectionFault0(pVCpu);
3286
3287 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3288 if (rcStrict == VINF_SUCCESS)
3289 { /* likely */ }
3290 else
3291 return rcStrict;
3292
3293 pVCpu->cpum.GstCtx.rip = uNewRip;
3294#ifndef IEM_WITH_CODE_TLB
3295 iemOpcodeFlushLight(pVCpu, cbInstr);
3296#endif
3297 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3298}
3299
3300
3301/**
3302 * Implements an 16-bit indirect call.
3303 *
3304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3305 * @param cbInstr The instruction length.
3306 * @param uNewRip The new RIP value.
3307 */
3308DECL_FORCE_INLINE(VBOXSTRICTRC)
3309iemRegEip32IndirectCallU16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3310{
3311 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3312 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3313 { /* likely */ }
3314 else
3315 return iemRaiseGeneralProtectionFault0(pVCpu);
3316
3317 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3318 if (rcStrict == VINF_SUCCESS)
3319 { /* likely */ }
3320 else
3321 return rcStrict;
3322
3323 pVCpu->cpum.GstCtx.rip = uNewRip;
3324#ifndef IEM_WITH_CODE_TLB
3325 iemOpcodeFlushLight(pVCpu, cbInstr);
3326#endif
3327 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3328}
3329
3330
3331/**
3332 * Implements an 32-bit indirect call, no checking or clearing of
3333 * flags.
3334 *
3335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3336 * @param cbInstr The instruction length.
3337 * @param uNewRip The new RIP value.
3338 */
3339DECL_FORCE_INLINE(VBOXSTRICTRC)
3340iemRegEip32IndirectCallU32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t uNewRip) RT_NOEXCEPT
3341{
3342 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3343 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3344 { /* likely */ }
3345 else
3346 return iemRaiseGeneralProtectionFault0(pVCpu);
3347
3348 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3349 if (rcStrict == VINF_SUCCESS)
3350 { /* likely */ }
3351 else
3352 return rcStrict;
3353
3354 pVCpu->cpum.GstCtx.rip = uNewRip;
3355#ifndef IEM_WITH_CODE_TLB
3356 iemOpcodeFlushLight(pVCpu, cbInstr);
3357#endif
3358 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3359}
3360
3361
3362/**
3363 * Implements an 32-bit indirect call.
3364 *
3365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3366 * @param cbInstr The instruction length.
3367 * @param uNewRip The new RIP value.
3368 */
3369DECL_FORCE_INLINE(VBOXSTRICTRC)
3370iemRegEip32IndirectCallU32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t uNewRip) RT_NOEXCEPT
3371{
3372 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3373 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3374 { /* likely */ }
3375 else
3376 return iemRaiseGeneralProtectionFault0(pVCpu);
3377
3378 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3379 if (rcStrict == VINF_SUCCESS)
3380 { /* likely */ }
3381 else
3382 return rcStrict;
3383
3384 pVCpu->cpum.GstCtx.rip = uNewRip;
3385#ifndef IEM_WITH_CODE_TLB
3386 iemOpcodeFlushLight(pVCpu, cbInstr);
3387#endif
3388 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3389}
3390
3391
3392/**
3393 * Implements an 64-bit indirect call, no checking or clearing of
3394 * flags.
3395 *
3396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3397 * @param cbInstr The instruction length.
3398 * @param uNewRip The new RIP value.
3399 */
3400DECL_FORCE_INLINE(VBOXSTRICTRC)
3401iemRegRip64IndirectCallU64AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t uNewRip) RT_NOEXCEPT
3402{
3403 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3404 if (IEM_IS_CANONICAL(uNewRip))
3405 { /* likely */ }
3406 else
3407 return iemRaiseGeneralProtectionFault0(pVCpu);
3408
3409 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3410 if (rcStrict == VINF_SUCCESS)
3411 { /* likely */ }
3412 else
3413 return rcStrict;
3414
3415 pVCpu->cpum.GstCtx.rip = uNewRip;
3416#ifndef IEM_WITH_CODE_TLB
3417 iemOpcodeFlushLight(pVCpu, cbInstr);
3418#endif
3419 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3420}
3421
3422
3423/**
3424 * Implements an 64-bit indirect call.
3425 *
3426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3427 * @param cbInstr The instruction length.
3428 * @param uNewRip The new RIP value.
3429 */
3430DECL_FORCE_INLINE(VBOXSTRICTRC)
3431iemRegRip64IndirectCallU64AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t uNewRip) RT_NOEXCEPT
3432{
3433 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3434 if (IEM_IS_CANONICAL(uNewRip))
3435 { /* likely */ }
3436 else
3437 return iemRaiseGeneralProtectionFault0(pVCpu);
3438
3439 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3440 if (rcStrict == VINF_SUCCESS)
3441 { /* likely */ }
3442 else
3443 return rcStrict;
3444
3445 pVCpu->cpum.GstCtx.rip = uNewRip;
3446#ifndef IEM_WITH_CODE_TLB
3447 iemOpcodeFlushLight(pVCpu, cbInstr);
3448#endif
3449 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3450}
3451
3452
3453
3454/**
3455 * Adds to the stack pointer.
3456 *
3457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param cbToAdd The number of bytes to add (8-bit!).
3459 */
3460DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
3461{
3462 if (IEM_IS_64BIT_CODE(pVCpu))
3463 pVCpu->cpum.GstCtx.rsp += cbToAdd;
3464 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3465 pVCpu->cpum.GstCtx.esp += cbToAdd;
3466 else
3467 pVCpu->cpum.GstCtx.sp += cbToAdd;
3468}
3469
3470
3471/**
3472 * Subtracts from the stack pointer.
3473 *
3474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3475 * @param cbToSub The number of bytes to subtract (8-bit!).
3476 */
3477DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
3478{
3479 if (IEM_IS_64BIT_CODE(pVCpu))
3480 pVCpu->cpum.GstCtx.rsp -= cbToSub;
3481 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3482 pVCpu->cpum.GstCtx.esp -= cbToSub;
3483 else
3484 pVCpu->cpum.GstCtx.sp -= cbToSub;
3485}
3486
3487
3488/**
3489 * Adds to the temporary stack pointer.
3490 *
3491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3492 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3493 * @param cbToAdd The number of bytes to add (16-bit).
3494 */
3495DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
3496{
3497 if (IEM_IS_64BIT_CODE(pVCpu))
3498 pTmpRsp->u += cbToAdd;
3499 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3500 pTmpRsp->DWords.dw0 += cbToAdd;
3501 else
3502 pTmpRsp->Words.w0 += cbToAdd;
3503}
3504
3505
3506/**
3507 * Subtracts from the temporary stack pointer.
3508 *
3509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3510 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3511 * @param cbToSub The number of bytes to subtract.
3512 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3513 * expecting that.
3514 */
3515DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
3516{
3517 if (IEM_IS_64BIT_CODE(pVCpu))
3518 pTmpRsp->u -= cbToSub;
3519 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3520 pTmpRsp->DWords.dw0 -= cbToSub;
3521 else
3522 pTmpRsp->Words.w0 -= cbToSub;
3523}
3524
3525
3526/**
3527 * Calculates the effective stack address for a push of the specified size as
3528 * well as the new RSP value (upper bits may be masked).
3529 *
3530 * @returns Effective stack addressf for the push.
3531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3532 * @param cbItem The size of the stack item to pop.
3533 * @param puNewRsp Where to return the new RSP value.
3534 */
3535DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
3536{
3537 RTUINT64U uTmpRsp;
3538 RTGCPTR GCPtrTop;
3539 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
3540
3541 if (IEM_IS_64BIT_CODE(pVCpu))
3542 GCPtrTop = uTmpRsp.u -= cbItem;
3543 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3544 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3545 else
3546 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3547 *puNewRsp = uTmpRsp.u;
3548 return GCPtrTop;
3549}
3550
3551
3552/**
3553 * Gets the current stack pointer and calculates the value after a pop of the
3554 * specified size.
3555 *
3556 * @returns Current stack pointer.
3557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3558 * @param cbItem The size of the stack item to pop.
3559 * @param puNewRsp Where to return the new RSP value.
3560 */
3561DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
3562{
3563 RTUINT64U uTmpRsp;
3564 RTGCPTR GCPtrTop;
3565 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
3566
3567 if (IEM_IS_64BIT_CODE(pVCpu))
3568 {
3569 GCPtrTop = uTmpRsp.u;
3570 uTmpRsp.u += cbItem;
3571 }
3572 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3573 {
3574 GCPtrTop = uTmpRsp.DWords.dw0;
3575 uTmpRsp.DWords.dw0 += cbItem;
3576 }
3577 else
3578 {
3579 GCPtrTop = uTmpRsp.Words.w0;
3580 uTmpRsp.Words.w0 += cbItem;
3581 }
3582 *puNewRsp = uTmpRsp.u;
3583 return GCPtrTop;
3584}
3585
3586
3587/**
3588 * Calculates the effective stack address for a push of the specified size as
3589 * well as the new temporary RSP value (upper bits may be masked).
3590 *
3591 * @returns Effective stack addressf for the push.
3592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3593 * @param pTmpRsp The temporary stack pointer. This is updated.
3594 * @param cbItem The size of the stack item to pop.
3595 */
3596DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
3597{
3598 RTGCPTR GCPtrTop;
3599
3600 if (IEM_IS_64BIT_CODE(pVCpu))
3601 GCPtrTop = pTmpRsp->u -= cbItem;
3602 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3603 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3604 else
3605 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3606 return GCPtrTop;
3607}
3608
3609
3610/**
3611 * Gets the effective stack address for a pop of the specified size and
3612 * calculates and updates the temporary RSP.
3613 *
3614 * @returns Current stack pointer.
3615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3616 * @param pTmpRsp The temporary stack pointer. This is updated.
3617 * @param cbItem The size of the stack item to pop.
3618 */
3619DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
3620{
3621 RTGCPTR GCPtrTop;
3622 if (IEM_IS_64BIT_CODE(pVCpu))
3623 {
3624 GCPtrTop = pTmpRsp->u;
3625 pTmpRsp->u += cbItem;
3626 }
3627 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3628 {
3629 GCPtrTop = pTmpRsp->DWords.dw0;
3630 pTmpRsp->DWords.dw0 += cbItem;
3631 }
3632 else
3633 {
3634 GCPtrTop = pTmpRsp->Words.w0;
3635 pTmpRsp->Words.w0 += cbItem;
3636 }
3637 return GCPtrTop;
3638}
3639
3640
3641/** Common body for iemRegRipNearReturnAndFinishClearingRF()
3642 * and iemRegRipNearReturnAndFinishNoFlags(). */
3643template<bool a_fWithFlags>
3644DECL_FORCE_INLINE(VBOXSTRICTRC)
3645iemRegRipNearReturnCommon(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
3646{
3647 /* Fetch the new RIP from the stack. */
3648 VBOXSTRICTRC rcStrict;
3649 RTUINT64U NewRip;
3650 RTUINT64U NewRsp;
3651 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
3652 switch (enmEffOpSize)
3653 {
3654 case IEMMODE_16BIT:
3655 NewRip.u = 0;
3656 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
3657 break;
3658 case IEMMODE_32BIT:
3659 NewRip.u = 0;
3660 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
3661 break;
3662 case IEMMODE_64BIT:
3663 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
3664 break;
3665 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3666 }
3667 if (rcStrict != VINF_SUCCESS)
3668 return rcStrict;
3669
3670 /* Check the new ew RIP before loading it. */
3671 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
3672 * of it. The canonical test is performed here and for call. */
3673 if (enmEffOpSize != IEMMODE_64BIT)
3674 {
3675 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))
3676 { /* likely */ }
3677 else
3678 {
3679 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
3680 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3681 }
3682 }
3683 else
3684 {
3685 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))
3686 { /* likely */ }
3687 else
3688 {
3689 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
3690 return iemRaiseNotCanonical(pVCpu);
3691 }
3692 }
3693
3694 /* Apply cbPop */
3695 if (cbPop)
3696 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
3697
3698 /* Commit it. */
3699 pVCpu->cpum.GstCtx.rip = NewRip.u;
3700 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
3701
3702 /* Flush the prefetch buffer. */
3703#ifndef IEM_WITH_CODE_TLB
3704 iemOpcodeFlushLight(pVCpu, cbInstr);
3705#endif
3706 RT_NOREF(cbInstr);
3707
3708
3709 if (a_fWithFlags)
3710 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3711 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3712}
3713
3714
3715/**
3716 * Implements retn and retn imm16.
3717 *
3718 * @param pVCpu The cross context virtual CPU structure of the
3719 * calling thread.
3720 * @param cbInstr The current instruction length.
3721 * @param enmEffOpSize The effective operand size. This is constant.
3722 * @param cbPop The amount of arguments to pop from the stack
3723 * (bytes). This can be constant (zero).
3724 */
3725DECL_FORCE_INLINE(VBOXSTRICTRC)
3726iemRegRipNearReturnAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
3727{
3728 return iemRegRipNearReturnCommon<true /*a_fWithFlags*/>(pVCpu, cbInstr, cbPop, enmEffOpSize);
3729}
3730
3731
3732/**
3733 * Implements retn and retn imm16, no checking or clearing of
3734 * flags.
3735 *
3736 * @param pVCpu The cross context virtual CPU structure of the
3737 * calling thread.
3738 * @param cbInstr The current instruction length.
3739 * @param enmEffOpSize The effective operand size. This is constant.
3740 * @param cbPop The amount of arguments to pop from the stack
3741 * (bytes). This can be constant (zero).
3742 */
3743DECL_FORCE_INLINE(VBOXSTRICTRC)
3744iemRegRipNearReturnAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
3745{
3746 return iemRegRipNearReturnCommon<false /*a_fWithFlags*/>(pVCpu, cbInstr, cbPop, enmEffOpSize);
3747}
3748
3749/** @} */
3750
3751
3752/** @name FPU access and helpers.
3753 *
3754 * @{
3755 */
3756
3757
3758/**
3759 * Hook for preparing to use the host FPU.
3760 *
3761 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3762 *
3763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3764 */
3765DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
3766{
3767#ifdef IN_RING3
3768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3769#else
3770 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
3771#endif
3772 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3773}
3774
3775
3776/**
3777 * Hook for preparing to use the host FPU for SSE.
3778 *
3779 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3780 *
3781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3782 */
3783DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
3784{
3785 iemFpuPrepareUsage(pVCpu);
3786}
3787
3788
3789/**
3790 * Hook for preparing to use the host FPU for AVX.
3791 *
3792 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3793 *
3794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3795 */
3796DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
3797{
3798 iemFpuPrepareUsage(pVCpu);
3799}
3800
3801
3802/**
3803 * Hook for actualizing the guest FPU state before the interpreter reads it.
3804 *
3805 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3806 *
3807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3808 */
3809DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3810{
3811#ifdef IN_RING3
3812 NOREF(pVCpu);
3813#else
3814 CPUMRZFpuStateActualizeForRead(pVCpu);
3815#endif
3816 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3817}
3818
3819
3820/**
3821 * Hook for actualizing the guest FPU state before the interpreter changes it.
3822 *
3823 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3824 *
3825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3826 */
3827DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3828{
3829#ifdef IN_RING3
3830 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3831#else
3832 CPUMRZFpuStateActualizeForChange(pVCpu);
3833#endif
3834 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3835}
3836
3837
3838/**
3839 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
3840 * only.
3841 *
3842 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3843 *
3844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3845 */
3846DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3847{
3848#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
3849 NOREF(pVCpu);
3850#else
3851 CPUMRZFpuStateActualizeSseForRead(pVCpu);
3852#endif
3853 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3854}
3855
3856
3857/**
3858 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
3859 * read+write.
3860 *
3861 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3862 *
3863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3864 */
3865DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3866{
3867#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
3868 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3869#else
3870 CPUMRZFpuStateActualizeForChange(pVCpu);
3871#endif
3872 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3873
3874 /* Make sure any changes are loaded the next time around. */
3875 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
3876}
3877
3878
3879/**
3880 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
3881 * only.
3882 *
3883 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3884 *
3885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3886 */
3887DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3888{
3889#ifdef IN_RING3
3890 NOREF(pVCpu);
3891#else
3892 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
3893#endif
3894 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3895}
3896
3897
3898/**
3899 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
3900 * read+write.
3901 *
3902 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3903 *
3904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3905 */
3906DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3907{
3908#ifdef IN_RING3
3909 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3910#else
3911 CPUMRZFpuStateActualizeForChange(pVCpu);
3912#endif
3913 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3914
3915 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
3916 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
3917}
3918
3919
3920/**
3921 * Stores a QNaN value into a FPU register.
3922 *
3923 * @param pReg Pointer to the register.
3924 */
3925DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
3926{
3927 pReg->au32[0] = UINT32_C(0x00000000);
3928 pReg->au32[1] = UINT32_C(0xc0000000);
3929 pReg->au16[4] = UINT16_C(0xffff);
3930}
3931
3932
3933/**
3934 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
3935 *
3936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3937 * @param pFpuCtx The FPU context.
3938 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
3939 */
3940DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
3941{
3942 Assert(uFpuOpcode != UINT16_MAX);
3943 pFpuCtx->FOP = uFpuOpcode;
3944 /** @todo x87.CS and FPUIP needs to be kept seperately. */
3945 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3946 {
3947 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3948 * happens in real mode here based on the fnsave and fnstenv images. */
3949 pFpuCtx->CS = 0;
3950 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
3951 }
3952 else if (!IEM_IS_LONG_MODE(pVCpu))
3953 {
3954 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
3955 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3956 }
3957 else
3958 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3959}
3960
3961
3962/**
3963 * Marks the specified stack register as free (for FFREE).
3964 *
3965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3966 * @param iStReg The register to free.
3967 */
3968DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3969{
3970 Assert(iStReg < 8);
3971 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3972 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3973 pFpuCtx->FTW &= ~RT_BIT(iReg);
3974}
3975
3976
3977/**
3978 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3979 *
3980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3981 */
3982DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3983{
3984 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3985 uint16_t uFsw = pFpuCtx->FSW;
3986 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3987 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3988 uFsw &= ~X86_FSW_TOP_MASK;
3989 uFsw |= uTop;
3990 pFpuCtx->FSW = uFsw;
3991}
3992
3993
3994/**
3995 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3996 *
3997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3998 */
3999DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
4000{
4001 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4002 uint16_t uFsw = pFpuCtx->FSW;
4003 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4004 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4005 uFsw &= ~X86_FSW_TOP_MASK;
4006 uFsw |= uTop;
4007 pFpuCtx->FSW = uFsw;
4008}
4009
4010
4011
4012
4013DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4014{
4015 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4016 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4017 if (pFpuCtx->FTW & RT_BIT(iReg))
4018 return VINF_SUCCESS;
4019 return VERR_NOT_FOUND;
4020}
4021
4022
4023DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
4024{
4025 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4026 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4027 if (pFpuCtx->FTW & RT_BIT(iReg))
4028 {
4029 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
4030 return VINF_SUCCESS;
4031 }
4032 return VERR_NOT_FOUND;
4033}
4034
4035
4036DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4037 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
4038{
4039 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4040 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4041 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4042 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4043 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4044 {
4045 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
4046 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
4047 return VINF_SUCCESS;
4048 }
4049 return VERR_NOT_FOUND;
4050}
4051
4052
4053DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
4054{
4055 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4056 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4057 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4058 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4059 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4060 {
4061 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
4062 return VINF_SUCCESS;
4063 }
4064 return VERR_NOT_FOUND;
4065}
4066
4067
4068/**
4069 * Rotates the stack registers when setting new TOS.
4070 *
4071 * @param pFpuCtx The FPU context.
4072 * @param iNewTop New TOS value.
4073 * @remarks We only do this to speed up fxsave/fxrstor which
4074 * arrange the FP registers in stack order.
4075 * MUST be done before writing the new TOS (FSW).
4076 */
4077DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
4078{
4079 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4080 RTFLOAT80U ar80Temp[8];
4081
4082 if (iOldTop == iNewTop)
4083 return;
4084
4085 /* Unscrew the stack and get it into 'native' order. */
4086 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
4087 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
4088 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
4089 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
4090 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
4091 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
4092 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
4093 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
4094
4095 /* Now rotate the stack to the new position. */
4096 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
4097 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
4098 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
4099 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
4100 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
4101 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
4102 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
4103 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
4104}
4105
4106
4107/**
4108 * Updates the FPU exception status after FCW is changed.
4109 *
4110 * @param pFpuCtx The FPU context.
4111 */
4112DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4113{
4114 uint16_t u16Fsw = pFpuCtx->FSW;
4115 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
4116 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4117 else
4118 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4119 pFpuCtx->FSW = u16Fsw;
4120}
4121
4122
4123/**
4124 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4125 *
4126 * @returns The full FTW.
4127 * @param pFpuCtx The FPU context.
4128 */
4129DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
4130{
4131 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
4132 uint16_t u16Ftw = 0;
4133 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4134 for (unsigned iSt = 0; iSt < 8; iSt++)
4135 {
4136 unsigned const iReg = (iSt + iTop) & 7;
4137 if (!(u8Ftw & RT_BIT(iReg)))
4138 u16Ftw |= 3 << (iReg * 2); /* empty */
4139 else
4140 {
4141 uint16_t uTag;
4142 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
4143 if (pr80Reg->s.uExponent == 0x7fff)
4144 uTag = 2; /* Exponent is all 1's => Special. */
4145 else if (pr80Reg->s.uExponent == 0x0000)
4146 {
4147 if (pr80Reg->s.uMantissa == 0x0000)
4148 uTag = 1; /* All bits are zero => Zero. */
4149 else
4150 uTag = 2; /* Must be special. */
4151 }
4152 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
4153 uTag = 0; /* Valid. */
4154 else
4155 uTag = 2; /* Must be special. */
4156
4157 u16Ftw |= uTag << (iReg * 2);
4158 }
4159 }
4160
4161 return u16Ftw;
4162}
4163
4164
4165/**
4166 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4167 *
4168 * @returns The compressed FTW.
4169 * @param u16FullFtw The full FTW to convert.
4170 */
4171DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
4172{
4173 uint8_t u8Ftw = 0;
4174 for (unsigned i = 0; i < 8; i++)
4175 {
4176 if ((u16FullFtw & 3) != 3 /*empty*/)
4177 u8Ftw |= RT_BIT(i);
4178 u16FullFtw >>= 2;
4179 }
4180
4181 return u8Ftw;
4182}
4183
4184/** @} */
4185
4186
4187/** @name Memory access.
4188 *
4189 * @{
4190 */
4191
4192
4193/**
4194 * Checks whether alignment checks are enabled or not.
4195 *
4196 * @returns true if enabled, false if not.
4197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4198 */
4199DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
4200{
4201#if 0
4202 AssertCompile(X86_CR0_AM == X86_EFL_AC);
4203 return IEM_GET_CPL(pVCpu) == 3
4204 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
4205#else
4206 return RT_BOOL(pVCpu->iem.s.fExec & IEM_F_X86_AC);
4207#endif
4208}
4209
4210/**
4211 * Checks if the given segment can be written to, raise the appropriate
4212 * exception if not.
4213 *
4214 * @returns VBox strict status code.
4215 *
4216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4217 * @param pHid Pointer to the hidden register.
4218 * @param iSegReg The register number.
4219 * @param pu64BaseAddr Where to return the base address to use for the
4220 * segment. (In 64-bit code it may differ from the
4221 * base in the hidden segment.)
4222 */
4223DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
4224 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
4225{
4226 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4227
4228 if (IEM_IS_64BIT_CODE(pVCpu))
4229 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4230 else
4231 {
4232 if (!pHid->Attr.n.u1Present)
4233 {
4234 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
4235 AssertRelease(uSel == 0);
4236 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
4237 return iemRaiseGeneralProtectionFault0(pVCpu);
4238 }
4239
4240 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4241 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4242 && !IEM_IS_64BIT_CODE(pVCpu) )
4243 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4244 *pu64BaseAddr = pHid->u64Base;
4245 }
4246 return VINF_SUCCESS;
4247}
4248
4249
4250/**
4251 * Checks if the given segment can be read from, raise the appropriate
4252 * exception if not.
4253 *
4254 * @returns VBox strict status code.
4255 *
4256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4257 * @param pHid Pointer to the hidden register.
4258 * @param iSegReg The register number.
4259 * @param pu64BaseAddr Where to return the base address to use for the
4260 * segment. (In 64-bit code it may differ from the
4261 * base in the hidden segment.)
4262 */
4263DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
4264 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
4265{
4266 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4267
4268 if (IEM_IS_64BIT_CODE(pVCpu))
4269 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4270 else
4271 {
4272 if (!pHid->Attr.n.u1Present)
4273 {
4274 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
4275 AssertRelease(uSel == 0);
4276 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
4277 return iemRaiseGeneralProtectionFault0(pVCpu);
4278 }
4279
4280 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4281 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
4282 *pu64BaseAddr = pHid->u64Base;
4283 }
4284 return VINF_SUCCESS;
4285}
4286
4287
4288/**
4289 * Maps a physical page.
4290 *
4291 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4293 * @param GCPhysMem The physical address.
4294 * @param fAccess The intended access.
4295 * @param ppvMem Where to return the mapping address.
4296 * @param pLock The PGM lock.
4297 */
4298DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
4299 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
4300{
4301#ifdef IEM_LOG_MEMORY_WRITES
4302 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4303 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4304#endif
4305
4306 /** @todo This API may require some improving later. A private deal with PGM
4307 * regarding locking and unlocking needs to be struct. A couple of TLBs
4308 * living in PGM, but with publicly accessible inlined access methods
4309 * could perhaps be an even better solution. */
4310 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
4311 GCPhysMem,
4312 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4313 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
4314 ppvMem,
4315 pLock);
4316 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4317 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4318
4319 return rc;
4320}
4321
4322
4323/**
4324 * Unmap a page previously mapped by iemMemPageMap.
4325 *
4326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4327 * @param GCPhysMem The physical address.
4328 * @param fAccess The intended access.
4329 * @param pvMem What iemMemPageMap returned.
4330 * @param pLock The PGM lock.
4331 */
4332DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
4333 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
4334{
4335 NOREF(pVCpu);
4336 NOREF(GCPhysMem);
4337 NOREF(fAccess);
4338 NOREF(pvMem);
4339 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
4340}
4341
4342#ifdef IEM_WITH_SETJMP
4343
4344/** @todo slim this down */
4345DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
4346 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
4347{
4348 Assert(cbMem >= 1);
4349 Assert(iSegReg < X86_SREG_COUNT);
4350
4351 /*
4352 * 64-bit mode is simpler.
4353 */
4354 if (IEM_IS_64BIT_CODE(pVCpu))
4355 {
4356 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
4357 {
4358 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4359 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
4360 GCPtrMem += pSel->u64Base;
4361 }
4362
4363 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
4364 return GCPtrMem;
4365 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
4366 }
4367 /*
4368 * 16-bit and 32-bit segmentation.
4369 */
4370 else if (iSegReg != UINT8_MAX)
4371 {
4372 /** @todo Does this apply to segments with 4G-1 limit? */
4373 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
4374 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
4375 {
4376 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4377 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
4378 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
4379 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
4380 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
4381 | X86_SEL_TYPE_CODE))
4382 {
4383 case X86DESCATTR_P: /* readonly data, expand up */
4384 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
4385 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
4386 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
4387 /* expand up */
4388 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
4389 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4390 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
4391 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
4392 break;
4393
4394 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
4395 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
4396 /* expand down */
4397 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
4398 && ( pSel->Attr.n.u1DefBig
4399 || GCPtrLast32 <= UINT32_C(0xffff)) ))
4400 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4401 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
4402 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
4403 break;
4404
4405 default:
4406 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
4407 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
4408 break;
4409 }
4410 }
4411 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
4412 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
4413 }
4414 /*
4415 * 32-bit flat address.
4416 */
4417 else
4418 return GCPtrMem;
4419}
4420
4421
4422/** @todo slim this down */
4423DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
4424 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
4425{
4426 Assert(cbMem >= 1);
4427 Assert(iSegReg < X86_SREG_COUNT);
4428
4429 /*
4430 * 64-bit mode is simpler.
4431 */
4432 if (IEM_IS_64BIT_CODE(pVCpu))
4433 {
4434 if (iSegReg >= X86_SREG_FS)
4435 {
4436 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4437 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
4438 GCPtrMem += pSel->u64Base;
4439 }
4440
4441 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
4442 return GCPtrMem;
4443 }
4444 /*
4445 * 16-bit and 32-bit segmentation.
4446 */
4447 else
4448 {
4449 Assert(GCPtrMem <= UINT32_MAX);
4450 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4451 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
4452 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
4453 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
4454 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
4455 /** @todo explore exactly how the CS stuff works in real mode. See also
4456 * http://www.rcollins.org/Productivity/DescriptorCache.html and
4457 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
4458 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
4459 {
4460 /* expand up */
4461 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
4462 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
4463 && GCPtrLast32 >= (uint32_t)GCPtrMem))
4464 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4465 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4466 }
4467 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
4468 {
4469 /* expand down - the uppger boundary is defined by the B bit, not G. */
4470 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
4471 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
4472 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
4473 && GCPtrLast32 >= (uint32_t)GCPtrMem))
4474 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4475 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4476 }
4477 else
4478 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4479 }
4480 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
4481}
4482
4483#endif /* IEM_WITH_SETJMP */
4484
4485/**
4486 * Fakes a long mode stack selector for SS = 0.
4487 *
4488 * @param pDescSs Where to return the fake stack descriptor.
4489 * @param uDpl The DPL we want.
4490 */
4491DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
4492{
4493 pDescSs->Long.au64[0] = 0;
4494 pDescSs->Long.au64[1] = 0;
4495 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4496 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
4497 pDescSs->Long.Gen.u2Dpl = uDpl;
4498 pDescSs->Long.Gen.u1Present = 1;
4499 pDescSs->Long.Gen.u1Long = 1;
4500}
4501
4502
4503/*
4504 * Unmap helpers.
4505 */
4506
4507#ifdef IEM_WITH_SETJMP
4508
4509DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4510{
4511# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4512 if (RT_LIKELY(bMapInfo == 0))
4513 return;
4514# endif
4515 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
4516}
4517
4518
4519DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4520{
4521# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4522 if (RT_LIKELY(bMapInfo == 0))
4523 return;
4524# endif
4525 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
4526}
4527
4528
4529DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4530{
4531# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4532 if (RT_LIKELY(bMapInfo == 0))
4533 return;
4534# endif
4535 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
4536}
4537
4538
4539DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4540{
4541# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4542 if (RT_LIKELY(bMapInfo == 0))
4543 return;
4544# endif
4545 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
4546}
4547
4548DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
4549{
4550# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4551 if (RT_LIKELY(bMapInfo == 0))
4552 return;
4553# endif
4554 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
4555}
4556
4557#endif /* IEM_WITH_SETJMP */
4558
4559
4560/*
4561 * Instantiate R/W inline templates.
4562 */
4563
4564/** @def TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
4565 * Used to check if an unaligned access is if within the page and won't
4566 * trigger an \#AC.
4567 *
4568 * This can also be used to deal with misaligned accesses on platforms that are
4569 * senstive to such if desires.
4570 */
4571#if 1
4572# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) \
4573 ( ((a_GCPtrEff) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(a_TmplMemType) \
4574 && !((a_pVCpu)->iem.s.fExec & IEM_F_X86_AC) )
4575#else
4576# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
4577#endif
4578
4579#define TMPL_MEM_WITH_ATOMIC_MAPPING
4580
4581#define TMPL_MEM_TYPE uint8_t
4582#define TMPL_MEM_TYPE_ALIGN 0
4583#define TMPL_MEM_TYPE_SIZE 1
4584#define TMPL_MEM_FN_SUFF U8
4585#define TMPL_MEM_FMT_TYPE "%#04x"
4586#define TMPL_MEM_FMT_DESC "byte"
4587#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4588
4589#define TMPL_MEM_WITH_STACK
4590
4591#define TMPL_MEM_TYPE uint16_t
4592#define TMPL_MEM_TYPE_ALIGN 1
4593#define TMPL_MEM_TYPE_SIZE 2
4594#define TMPL_MEM_FN_SUFF U16
4595#define TMPL_MEM_FMT_TYPE "%#06x"
4596#define TMPL_MEM_FMT_DESC "word"
4597#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4598
4599#define TMPL_WITH_PUSH_SREG
4600#define TMPL_MEM_TYPE uint32_t
4601#define TMPL_MEM_TYPE_ALIGN 3
4602#define TMPL_MEM_TYPE_SIZE 4
4603#define TMPL_MEM_FN_SUFF U32
4604#define TMPL_MEM_FMT_TYPE "%#010x"
4605#define TMPL_MEM_FMT_DESC "dword"
4606#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4607#undef TMPL_WITH_PUSH_SREG
4608
4609#define TMPL_MEM_TYPE uint64_t
4610#define TMPL_MEM_TYPE_ALIGN 7
4611#define TMPL_MEM_TYPE_SIZE 8
4612#define TMPL_MEM_FN_SUFF U64
4613#define TMPL_MEM_FMT_TYPE "%#018RX64"
4614#define TMPL_MEM_FMT_DESC "qword"
4615#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4616
4617#undef TMPL_MEM_WITH_STACK
4618#undef TMPL_MEM_WITH_ATOMIC_MAPPING
4619
4620#define TMPL_MEM_NO_STORE
4621#define TMPL_MEM_NO_MAPPING
4622#define TMPL_MEM_TYPE uint64_t
4623#define TMPL_MEM_TYPE_ALIGN 15
4624#define TMPL_MEM_TYPE_SIZE 8
4625#define TMPL_MEM_FN_SUFF U64AlignedU128
4626#define TMPL_MEM_FMT_TYPE "%#018RX64"
4627#define TMPL_MEM_FMT_DESC "qword"
4628#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4629
4630#undef TMPL_MEM_NO_STORE
4631#undef TMPL_MEM_NO_MAPPING
4632
4633#define TMPL_MEM_TYPE RTFLOAT80U
4634#define TMPL_MEM_TYPE_ALIGN 7
4635#define TMPL_MEM_TYPE_SIZE 10
4636#define TMPL_MEM_FN_SUFF R80
4637#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
4638#define TMPL_MEM_FMT_DESC "tword"
4639#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4640
4641#define TMPL_MEM_TYPE RTPBCD80U
4642#define TMPL_MEM_TYPE_ALIGN 7 /** @todo RTPBCD80U alignment testcase */
4643#define TMPL_MEM_TYPE_SIZE 10
4644#define TMPL_MEM_FN_SUFF D80
4645#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
4646#define TMPL_MEM_FMT_DESC "tword"
4647#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4648
4649#define TMPL_MEM_WITH_ATOMIC_MAPPING
4650#define TMPL_MEM_TYPE RTUINT128U
4651#define TMPL_MEM_TYPE_ALIGN 15
4652#define TMPL_MEM_TYPE_SIZE 16
4653#define TMPL_MEM_FN_SUFF U128
4654#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
4655#define TMPL_MEM_FMT_DESC "dqword"
4656#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4657#undef TMPL_MEM_WITH_ATOMIC_MAPPING
4658
4659#define TMPL_MEM_NO_MAPPING
4660#define TMPL_MEM_TYPE RTUINT128U
4661#define TMPL_MEM_TYPE_ALIGN 0
4662#define TMPL_MEM_TYPE_SIZE 16
4663#define TMPL_MEM_FN_SUFF U128NoAc
4664#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
4665#define TMPL_MEM_FMT_DESC "dqword"
4666#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4667#undef TMPL_MEM_NO_MAPPING
4668
4669
4670/* Every template relying on unaligned accesses inside a page not being okay should go below. */
4671#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
4672#define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
4673
4674#define TMPL_MEM_NO_MAPPING
4675#define TMPL_MEM_TYPE RTUINT128U
4676#define TMPL_MEM_TYPE_ALIGN 15
4677#define TMPL_MEM_TYPE_SIZE 16
4678#define TMPL_MEM_FN_SUFF U128AlignedSse
4679#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
4680#define TMPL_MEM_FMT_DESC "dqword"
4681#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4682#undef TMPL_MEM_NO_MAPPING
4683
4684#define TMPL_MEM_NO_MAPPING
4685#define TMPL_MEM_TYPE RTUINT256U
4686#define TMPL_MEM_TYPE_ALIGN 0
4687#define TMPL_MEM_TYPE_SIZE 32
4688#define TMPL_MEM_FN_SUFF U256NoAc
4689#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
4690#define TMPL_MEM_FMT_DESC "qqword"
4691#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4692#undef TMPL_MEM_NO_MAPPING
4693
4694#define TMPL_MEM_NO_MAPPING
4695#define TMPL_MEM_TYPE RTUINT256U
4696#define TMPL_MEM_TYPE_ALIGN 31
4697#define TMPL_MEM_TYPE_SIZE 32
4698#define TMPL_MEM_FN_SUFF U256AlignedAvx
4699#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
4700#define TMPL_MEM_FMT_DESC "qqword"
4701#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4702#undef TMPL_MEM_NO_MAPPING
4703
4704#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
4705
4706/** @} */
4707
4708
4709#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4710
4711/**
4712 * Gets CR0 fixed-0 bits in VMX operation.
4713 *
4714 * We do this rather than fetching what we report to the guest (in
4715 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
4716 * values regardless of whether unrestricted-guest feature is available on the CPU.
4717 *
4718 * @returns CR0 fixed-0 bits.
4719 * @param pVCpu The cross context virtual CPU structure.
4720 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
4721 * must be returned. When @c false, the CR0 fixed-0
4722 * bits for VMX root mode is returned.
4723 *
4724 */
4725DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
4726{
4727 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
4728
4729 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
4730 if ( fVmxNonRootMode
4731 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
4732 return VMX_V_CR0_FIXED0_UX;
4733 return VMX_V_CR0_FIXED0;
4734}
4735
4736
4737# ifdef XAPIC_OFF_END /* Requires VBox/apic.h to be included before IEMInline.h. */
4738/**
4739 * Sets virtual-APIC write emulation as pending.
4740 *
4741 * @param pVCpu The cross context virtual CPU structure.
4742 * @param offApic The offset in the virtual-APIC page that was written.
4743 */
4744DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
4745{
4746 Assert(offApic < XAPIC_OFF_END + 4);
4747
4748 /*
4749 * Record the currently updated APIC offset, as we need this later for figuring
4750 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4751 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4752 */
4753 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
4754
4755 /*
4756 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
4757 * virtualization or APIC-write emulation).
4758 */
4759 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4760 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4761}
4762# endif /* XAPIC_OFF_END */
4763
4764#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
4765
4766#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette