VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMInline-x86.h

Last change on this file was 108409, checked in by vboxsync, 2 months ago

VMM/IEM: Made IEMAll.cpp build targeting arm. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.4 KB
Line 
1/* $Id: IEMInline-x86.h 108409 2025-02-27 10:35:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInline_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInline_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/err.h>
35
36
37/**
38 * Calculates the IEM_F_X86_AC flags.
39 *
40 * @returns IEM_F_X86_AC or zero
41 * @param pVCpu The cross context virtual CPU structure of the
42 * calling thread.
43 */
44DECL_FORCE_INLINE(uint32_t) iemCalcExecAcFlag(PVMCPUCC pVCpu) RT_NOEXCEPT
45{
46 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
47 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
48
49 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
50 || (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_AM | X86_CR0_PE)) != (X86_CR0_AM | X86_CR0_PE)
51 || ( !pVCpu->cpum.GstCtx.eflags.Bits.u1VM
52 && pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl != 3))
53 return 0;
54 return IEM_F_X86_AC;
55}
56
57
58/**
59 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
60 *
61 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
62 * reject expand down data segments and conforming code segments.
63 *
64 * ASSUMES that the CPU is in 32-bit mode.
65 *
66 * @note Will return zero when if any of the segment register state is marked
67 * external, this must be factored into assertions checking fExec
68 * consistency.
69 *
70 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
71 * @param pVCpu The cross context virtual CPU structure of the
72 * calling thread.
73 * @sa iemCalc32BitFlatIndicatorEsDs
74 */
75DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
76{
77 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
78 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
79 | pVCpu->cpum.GstCtx.cs.Attr.u
80 | pVCpu->cpum.GstCtx.ss.Attr.u
81 | pVCpu->cpum.GstCtx.ds.Attr.u)
82 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
83 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
84 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
85 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
86 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
87 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
88 == 0
89 && ( pVCpu->cpum.GstCtx.es.u64Base
90 | pVCpu->cpum.GstCtx.cs.u64Base
91 | pVCpu->cpum.GstCtx.ss.u64Base
92 | pVCpu->cpum.GstCtx.ds.u64Base)
93 == 0
94 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
95 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
96}
97
98
99/**
100 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
101 * flat already.
102 *
103 * This is used by sysenter.
104 *
105 * @note Will return zero when if any of the segment register state is marked
106 * external, this must be factored into assertions checking fExec
107 * consistency.
108 *
109 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
110 * @param pVCpu The cross context virtual CPU structure of the
111 * calling thread.
112 * @sa iemCalc32BitFlatIndicator
113 */
114DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
115{
116 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
117 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
118 | pVCpu->cpum.GstCtx.ds.Attr.u)
119 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
120 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
121 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
122 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
123 == 0
124 && ( pVCpu->cpum.GstCtx.es.u64Base
125 | pVCpu->cpum.GstCtx.ds.u64Base)
126 == 0
127 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
128 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
129}
130
131
132/**
133 * Calculates the IEM_F_MODE_XXX, CPL and AC flags.
134 *
135 * @returns IEM_F_MODE_XXX, IEM_F_X86_CPL_MASK and IEM_F_X86_AC.
136 * @param pVCpu The cross context virtual CPU structure of the
137 * calling thread.
138 */
139DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
140{
141 /*
142 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
143 * here to try get this done as efficiently as possible.
144 */
145 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
146
147 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
148 {
149 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
150 {
151 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
152 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
153 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
154 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
155 || fExec != (3U << IEM_F_X86_CPL_SHIFT))
156 { /* likely */ }
157 else
158 fExec |= IEM_F_X86_AC;
159
160 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
161 {
162 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
163 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
164 }
165 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
166 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
167 fExec |= IEM_F_MODE_X86_64BIT;
168 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
169 fExec |= IEM_F_MODE_X86_16BIT_PROT;
170 else
171 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
172 return fExec;
173 }
174 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
175 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM))
176 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
177 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT) | IEM_F_X86_AC;
178 }
179
180 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
181 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
182 {
183 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
184 return IEM_F_MODE_X86_16BIT;
185 return IEM_F_MODE_X86_16BIT_PRE_386;
186 }
187
188 /* 32-bit unreal mode. */
189 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
190}
191
192
193/**
194 * Calculates the AMD-V and VT-x related context flags.
195 *
196 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
197 * IEM_F_X86_CTX_VMX.
198 * @param pVCpu The cross context virtual CPU structure of the
199 * calling thread.
200 */
201DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
202{
203 /*
204 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
205 * and CPUMIsGuestInNestedHwvirtMode to some extent.
206 */
207 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
208
209 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
210 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
211 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
212 if (RT_LIKELY(!fTmp))
213 return 0; /* likely */
214
215 if (fTmp & X86_CR4_VMXE)
216 {
217 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
218 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
219 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
220 return IEM_F_X86_CTX_VMX;
221 }
222
223 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
224 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
225 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
226 return IEM_F_X86_CTX_SVM;
227}
228
229#ifdef VBOX_INCLUDED_vmm_dbgf_h /* VM::dbgf.ro.cEnabledHwBreakpoints is only accessible if VBox/vmm/dbgf.h is included. */
230
231/**
232 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
233 *
234 * @returns IEM_F_BRK_PENDING_XXX or zero.
235 * @param pVCpu The cross context virtual CPU structure of the
236 * calling thread.
237 */
238DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
239{
240 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
241
242 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
243 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
244 return 0;
245 return iemCalcExecDbgFlagsSlow(pVCpu);
246}
247
248
249DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
250{
251 return iemCalcExecModeAndCplFlags(pVCpu)
252 | iemCalcExecHwVirtFlags(pVCpu)
253 /* SMM is not yet implemented */
254 | iemCalcExecDbgFlags(pVCpu)
255 ;
256}
257
258
259/**
260 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 */
265DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplAndAcFlags(PVMCPUCC pVCpu)
266{
267 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
268 | iemCalcExecModeAndCplFlags(pVCpu);
269}
270
271
272/**
273 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
274 *
275 * @param pVCpu The cross context virtual CPU structure of the
276 * calling thread.
277 */
278DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
279{
280 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
281 | iemCalcExecDbgFlags(pVCpu);
282}
283
284#endif /* VBOX_INCLUDED_vmm_dbgf_h */
285
286/**
287 * Macro used by the IEMExec* method to check the given instruction length.
288 *
289 * Will return on failure!
290 *
291 * @param a_cbInstr The given instruction length.
292 * @param a_cbMin The minimum length.
293 */
294#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
295 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
296 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
297
298
299/** @name Misc Worker Functions.
300 * @{
301 */
302
303/**
304 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
305 * not (kind of obsolete now).
306 *
307 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
308 */
309#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
310
311/**
312 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
313 *
314 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
315 * @param a_fEfl The new EFLAGS.
316 */
317#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
318
319
320/**
321 * Loads a NULL data selector into a selector register, both the hidden and
322 * visible parts, in protected mode.
323 *
324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
325 * @param pSReg Pointer to the segment register.
326 * @param uRpl The RPL.
327 */
328DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
329{
330 /** @todo Testcase: write a testcase checking what happends when loading a NULL
331 * data selector in protected mode. */
332 pSReg->Sel = uRpl;
333 pSReg->ValidSel = uRpl;
334 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
335 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
336 {
337 /* VT-x (Intel 3960x) observed doing something like this. */
338 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
339 pSReg->u32Limit = UINT32_MAX;
340 pSReg->u64Base = 0;
341 }
342 else
343 {
344 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
345 pSReg->u32Limit = 0;
346 pSReg->u64Base = 0;
347 }
348}
349
350/** @} */
351
352
353/** @name Register Access.
354 * @{
355 */
356
357/**
358 * Gets a reference (pointer) to the specified hidden segment register.
359 *
360 * @returns Hidden register reference.
361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
362 * @param iSegReg The segment register.
363 */
364DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
365{
366 Assert(iSegReg < X86_SREG_COUNT);
367 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
368 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
369
370 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
371 return pSReg;
372}
373
374
375/**
376 * Ensures that the given hidden segment register is up to date.
377 *
378 * @returns Hidden register reference.
379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
380 * @param pSReg The segment register.
381 */
382DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
383{
384 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
385 NOREF(pVCpu);
386 return pSReg;
387}
388
389
390/**
391 * Gets a reference (pointer) to the specified segment register (the selector
392 * value).
393 *
394 * @returns Pointer to the selector variable.
395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
396 * @param iSegReg The segment register.
397 */
398DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
399{
400 Assert(iSegReg < X86_SREG_COUNT);
401 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
402 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
403}
404
405
406/**
407 * Fetches the selector value of a segment register.
408 *
409 * @returns The selector value.
410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
411 * @param iSegReg The segment register.
412 */
413DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
414{
415 Assert(iSegReg < X86_SREG_COUNT);
416 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
417 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
418}
419
420
421/**
422 * Fetches the base address value of a segment register.
423 *
424 * @returns The selector value.
425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
426 * @param iSegReg The segment register.
427 */
428DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
429{
430 Assert(iSegReg < X86_SREG_COUNT);
431 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
432 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
433}
434
435
436/**
437 * Gets a reference (pointer) to the specified general purpose register.
438 *
439 * @returns Register reference.
440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
441 * @param iReg The general purpose register.
442 */
443DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
444{
445 Assert(iReg < 16);
446 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
447}
448
449
450#ifndef IEM_WITH_OPAQUE_DECODER_STATE
451/**
452 * Gets a reference (pointer) to the specified 8-bit general purpose register.
453 *
454 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
455 *
456 * @returns Register reference.
457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
458 * @param iReg The register.
459 */
460DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
461{
462 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)))
463 {
464 Assert(iReg < 16);
465 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
466 }
467 /* high 8-bit register. */
468 Assert(iReg < 8);
469 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
470}
471#endif
472
473
474/**
475 * Gets a reference (pointer) to the specified 8-bit general purpose register,
476 * alternative version with extended (20) register index.
477 *
478 * @returns Register reference.
479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
480 * @param iRegEx The register. The 16 first are regular ones,
481 * whereas 16 thru 19 maps to AH, CH, DH and BH.
482 */
483DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
484{
485 /** @todo This could be done by double indexing on little endian hosts:
486 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
487 if (iRegEx < 16)
488 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
489
490 /* high 8-bit register. */
491 Assert(iRegEx < 20);
492 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
493}
494
495
496/**
497 * Gets a reference (pointer) to the specified 16-bit general purpose register.
498 *
499 * @returns Register reference.
500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
501 * @param iReg The register.
502 */
503DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
504{
505 Assert(iReg < 16);
506 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
507}
508
509
510/**
511 * Gets a reference (pointer) to the specified 32-bit general purpose register.
512 *
513 * @returns Register reference.
514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
515 * @param iReg The register.
516 */
517DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
518{
519 Assert(iReg < 16);
520 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
521}
522
523
524/**
525 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
526 *
527 * @returns Register reference.
528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
529 * @param iReg The register.
530 */
531DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
532{
533 Assert(iReg < 16);
534 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
535}
536
537
538/**
539 * Gets a reference (pointer) to the specified 64-bit general purpose register.
540 *
541 * @returns Register reference.
542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
543 * @param iReg The register.
544 */
545DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
546{
547 Assert(iReg < 64);
548 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
549}
550
551
552/**
553 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
554 *
555 * @returns Register reference.
556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
557 * @param iReg The register.
558 */
559DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
560{
561 Assert(iReg < 16);
562 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
563}
564
565
566/**
567 * Gets a reference (pointer) to the specified segment register's base address.
568 *
569 * @returns Segment register base address reference.
570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
571 * @param iSegReg The segment selector.
572 */
573DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
574{
575 Assert(iSegReg < X86_SREG_COUNT);
576 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
577 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
578}
579
580
581#ifndef IEM_WITH_OPAQUE_DECODER_STATE
582/**
583 * Fetches the value of a 8-bit general purpose register.
584 *
585 * @returns The register value.
586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
587 * @param iReg The register.
588 */
589DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
590{
591 return *iemGRegRefU8(pVCpu, iReg);
592}
593#endif
594
595
596/**
597 * Fetches the value of a 8-bit general purpose register, alternative version
598 * with extended (20) register index.
599
600 * @returns The register value.
601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
602 * @param iRegEx The register. The 16 first are regular ones,
603 * whereas 16 thru 19 maps to AH, CH, DH and BH.
604 */
605DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
606{
607 return *iemGRegRefU8Ex(pVCpu, iRegEx);
608}
609
610
611/**
612 * Fetches the value of a 16-bit general purpose register.
613 *
614 * @returns The register value.
615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
616 * @param iReg The register.
617 */
618DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
619{
620 Assert(iReg < 16);
621 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
622}
623
624
625/**
626 * Fetches the value of a 32-bit general purpose register.
627 *
628 * @returns The register value.
629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
630 * @param iReg The register.
631 */
632DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
633{
634 Assert(iReg < 16);
635 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
636}
637
638
639/**
640 * Fetches the value of a 64-bit general purpose register.
641 *
642 * @returns The register value.
643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
644 * @param iReg The register.
645 */
646DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
647{
648 Assert(iReg < 16);
649 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
650}
651
652
653/**
654 * Stores a 16-bit value to a general purpose register.
655 *
656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
657 * @param iReg The register.
658 * @param uValue The value to store.
659 */
660DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
661{
662 Assert(iReg < 16);
663 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
664}
665
666
667/**
668 * Stores a 32-bit value to a general purpose register, implicitly clearing high
669 * values.
670 *
671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
672 * @param iReg The register.
673 * @param uValue The value to store.
674 */
675DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
676{
677 Assert(iReg < 16);
678 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
679}
680
681
682/**
683 * Stores a 64-bit value to a general purpose register.
684 *
685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
686 * @param iReg The register.
687 * @param uValue The value to store.
688 */
689DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
690{
691 Assert(iReg < 16);
692 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
693}
694
695
696/**
697 * Get the address of the top of the stack.
698 *
699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
700 */
701DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
702{
703 if (IEM_IS_64BIT_CODE(pVCpu))
704 return pVCpu->cpum.GstCtx.rsp;
705 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
706 return pVCpu->cpum.GstCtx.esp;
707 return pVCpu->cpum.GstCtx.sp;
708}
709
710
711/**
712 * Updates the RIP/EIP/IP to point to the next instruction.
713 *
714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
715 * @param cbInstr The number of bytes to add.
716 */
717DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
718{
719 /*
720 * Advance RIP.
721 *
722 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
723 * while in all other modes except LM64 the updates are 32-bit. This means
724 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
725 * 4GB and 64KB rollovers, and decide whether anything needs masking.
726 *
727 * See PC wrap around tests in bs3-cpu-weird-1.
728 */
729 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
730 uint64_t const uRipNext = uRipPrev + cbInstr;
731 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
732 || IEM_IS_64BIT_CODE(pVCpu)))
733 pVCpu->cpum.GstCtx.rip = uRipNext;
734 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
735 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
736 else
737 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
738}
739
740
741/**
742 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
743 * following EFLAGS bits are set:
744 * - X86_EFL_RF - clear it.
745 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
746 * - X86_EFL_TF - generate single step \#DB trap.
747 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
748 * instruction).
749 *
750 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
751 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
752 * takes priority over both NMIs and hardware interrupts. So, neither is
753 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
754 * either unsupported will be triggered on-top of any \#DB raised here.)
755 *
756 * The RF flag only needs to be cleared here as it only suppresses instruction
757 * breakpoints which are not raised here (happens synchronously during
758 * instruction fetching).
759 *
760 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
761 * status has no bearing on whether \#DB exceptions are raised.
762 *
763 * @note This must *NOT* be called by the two instructions setting the
764 * CPUMCTX_INHIBIT_SHADOW_SS flag.
765 *
766 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
767 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
768 * Stacks}
769 */
770template<uint32_t const a_fTF = X86_EFL_TF>
771static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
772{
773 /*
774 * Normally we're just here to clear RF and/or interrupt shadow bits.
775 */
776 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (a_fTF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
777 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
778 else
779 {
780 /*
781 * Raise a #DB or/and DBGF event.
782 */
783 VBOXSTRICTRC rcStrict;
784 if (pVCpu->cpum.GstCtx.eflags.uBoth & (a_fTF | CPUMCTX_DBG_HIT_DRX_MASK))
785 {
786 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
787 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
788 if (pVCpu->cpum.GstCtx.eflags.uBoth & a_fTF)
789 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
790 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
791 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
792 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
793 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
794 pVCpu->cpum.GstCtx.rflags.uBoth));
795
796 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
797 rcStrict = iemRaiseDebugException(pVCpu);
798
799 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
800 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
801 {
802 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
803 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
804 }
805 }
806 else
807 {
808 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
809 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
810 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
811 }
812 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
813 Assert(rcStrict != VINF_SUCCESS);
814 return rcStrict;
815 }
816 return rcNormal;
817}
818
819
820/**
821 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
822 *
823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
824 * @param rcNormal VINF_SUCCESS to continue TB.
825 * VINF_IEM_REEXEC_BREAK to force TB exit when
826 * taking the wrong conditional branhc.
827 */
828DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
829{
830 /*
831 * We assume that most of the time nothing actually needs doing here.
832 */
833 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
834 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
835 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
836 return rcNormal;
837 return iemFinishInstructionWithFlagsSet(pVCpu, rcNormal);
838}
839
840
841/**
842 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
843 * and CPUMCTX_INHIBIT_SHADOW.
844 *
845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
846 * @param cbInstr The number of bytes to add.
847 */
848DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
849{
850 iemRegAddToRip(pVCpu, cbInstr);
851 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
852}
853
854
855/**
856 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
857 * and CPUMCTX_INHIBIT_SHADOW.
858 *
859 * Only called from 64-bit code.
860 *
861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
862 * @param cbInstr The number of bytes to add.
863 * @param rcNormal VINF_SUCCESS to continue TB.
864 * VINF_IEM_REEXEC_BREAK to force TB exit when
865 * taking the wrong conditional branhc.
866 */
867DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
868{
869 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
870 return iemRegFinishClearingRF(pVCpu, rcNormal);
871}
872
873
874/**
875 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
876 * CPUMCTX_INHIBIT_SHADOW.
877 *
878 * This is never from 64-bit code.
879 *
880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
881 * @param cbInstr The number of bytes to add.
882 * @param rcNormal VINF_SUCCESS to continue TB.
883 * VINF_IEM_REEXEC_BREAK to force TB exit when
884 * taking the wrong conditional branhc.
885 */
886DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
887{
888 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
889 return iemRegFinishClearingRF(pVCpu, rcNormal);
890}
891
892
893/**
894 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
895 * CPUMCTX_INHIBIT_SHADOW.
896 *
897 * This is only ever used from 16-bit code on a pre-386 CPU.
898 *
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 * @param cbInstr The number of bytes to add.
901 * @param rcNormal VINF_SUCCESS to continue TB.
902 * VINF_IEM_REEXEC_BREAK to force TB exit when
903 * taking the wrong conditional branhc.
904 */
905DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
906{
907 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
908 return iemRegFinishClearingRF(pVCpu, rcNormal);
909}
910
911
912/**
913 * Tail method for a finish function that does't clear flags or raise \#DB.
914 *
915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
916 * @param rcNormal VINF_SUCCESS to continue TB.
917 * VINF_IEM_REEXEC_BREAK to force TB exit when
918 * taking the wrong conditional branhc.
919 */
920DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishNoFlags(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
921{
922 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
923 Assert(!( pVCpu->cpum.GstCtx.eflags.uBoth
924 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) );
925 RT_NOREF(pVCpu);
926 return rcNormal;
927}
928
929
930/**
931 * Updates the RIP to point to the next instruction, but does not need to clear
932 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
933 *
934 * Only called from 64-bit code.
935 *
936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
937 * @param cbInstr The number of bytes to add.
938 * @param rcNormal VINF_SUCCESS to continue TB.
939 * VINF_IEM_REEXEC_BREAK to force TB exit when
940 * taking the wrong conditional branhc.
941 */
942DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
943{
944 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
945 return iemRegFinishNoFlags(pVCpu, rcNormal);
946}
947
948
949/**
950 * Updates the EIP to point to the next instruction, but does not need to clear
951 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
952 *
953 * This is never from 64-bit code.
954 *
955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
956 * @param cbInstr The number of bytes to add.
957 * @param rcNormal VINF_SUCCESS to continue TB.
958 * VINF_IEM_REEXEC_BREAK to force TB exit when
959 * taking the wrong conditional branhc.
960 */
961DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
962{
963 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
964 return iemRegFinishNoFlags(pVCpu, rcNormal);
965}
966
967
968/**
969 * Updates the IP to point to the next instruction, but does not need to clear
970 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
971 *
972 * This is only ever used from 16-bit code on a pre-386 CPU.
973 *
974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
975 * @param cbInstr The number of bytes to add.
976 * @param rcNormal VINF_SUCCESS to continue TB.
977 * VINF_IEM_REEXEC_BREAK to force TB exit when
978 * taking the wrong conditional branhc.
979 *
980 */
981DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
982{
983 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
984 return iemRegFinishNoFlags(pVCpu, rcNormal);
985}
986
987
988/**
989 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
990 *
991 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
992 * segment limit.
993 *
994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
995 * @param cbInstr Instruction size.
996 * @param offNextInstr The offset of the next instruction.
997 * @param enmEffOpSize Effective operand size.
998 * @param rcNormal VINF_SUCCESS to continue TB.
999 * VINF_IEM_REEXEC_BREAK to force TB exit when
1000 * taking the wrong conditional branhc.
1001 */
1002DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1003 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1004{
1005 Assert(IEM_IS_64BIT_CODE(pVCpu));
1006 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
1007
1008 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1009 if (enmEffOpSize == IEMMODE_16BIT)
1010 uNewRip &= UINT16_MAX;
1011
1012 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1013 pVCpu->cpum.GstCtx.rip = uNewRip;
1014 else
1015 return iemRaiseGeneralProtectionFault0(pVCpu);
1016
1017#ifndef IEM_WITH_CODE_TLB
1018 iemOpcodeFlushLight(pVCpu, cbInstr);
1019#endif
1020
1021 /*
1022 * Clear RF and finish the instruction (maybe raise #DB).
1023 */
1024 return iemRegFinishClearingRF(pVCpu, rcNormal);
1025}
1026
1027
1028/**
1029 * Adds a 8-bit signed jump offset to RIP from 64-bit code when the caller is
1030 * sure it stays within the same page.
1031 *
1032 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1033 * segment limit.
1034 *
1035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1036 * @param cbInstr Instruction size.
1037 * @param offNextInstr The offset of the next instruction.
1038 * @param enmEffOpSize Effective operand size.
1039 * @param rcNormal VINF_SUCCESS to continue TB.
1040 * VINF_IEM_REEXEC_BREAK to force TB exit when
1041 * taking the wrong conditional branhc.
1042 */
1043DECL_FORCE_INLINE(VBOXSTRICTRC)
1044iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1045 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1046{
1047 Assert(IEM_IS_64BIT_CODE(pVCpu));
1048 Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
1049
1050 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1051 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
1052 pVCpu->cpum.GstCtx.rip = uNewRip;
1053
1054#ifndef IEM_WITH_CODE_TLB
1055 iemOpcodeFlushLight(pVCpu, cbInstr);
1056#endif
1057
1058 /*
1059 * Clear RF and finish the instruction (maybe raise #DB).
1060 */
1061 return iemRegFinishClearingRF(pVCpu, rcNormal);
1062}
1063
1064
1065/**
1066 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
1067 * code (never 64-bit).
1068 *
1069 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1070 * segment limit.
1071 *
1072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1073 * @param cbInstr Instruction size.
1074 * @param offNextInstr The offset of the next instruction.
1075 * @param enmEffOpSize Effective operand size.
1076 * @param rcNormal VINF_SUCCESS to continue TB.
1077 * VINF_IEM_REEXEC_BREAK to force TB exit when
1078 * taking the wrong conditional branhc.
1079 */
1080DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1081 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1082{
1083 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1084 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1085
1086 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
1087 if (enmEffOpSize == IEMMODE_16BIT)
1088 uNewEip &= UINT16_MAX;
1089 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1090 pVCpu->cpum.GstCtx.rip = uNewEip;
1091 else
1092 return iemRaiseGeneralProtectionFault0(pVCpu);
1093
1094#ifndef IEM_WITH_CODE_TLB
1095 iemOpcodeFlushLight(pVCpu, cbInstr);
1096#endif
1097
1098 /*
1099 * Clear RF and finish the instruction (maybe raise #DB).
1100 */
1101 return iemRegFinishClearingRF(pVCpu, rcNormal);
1102}
1103
1104
1105/**
1106 * Adds a 8-bit signed jump offset to EIP, on 386 or later from FLAT 32-bit code
1107 * (never 64-bit).
1108 *
1109 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1110 * segment limit.
1111 *
1112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1113 * @param cbInstr Instruction size.
1114 * @param offNextInstr The offset of the next instruction.
1115 * @param enmEffOpSize Effective operand size.
1116 * @param rcNormal VINF_SUCCESS to continue TB.
1117 * VINF_IEM_REEXEC_BREAK to force TB exit when
1118 * taking the wrong conditional branhc.
1119 */
1120DECL_FORCE_INLINE(VBOXSTRICTRC)
1121 iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1122 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1123{
1124 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1125 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1126
1127 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
1128 if (enmEffOpSize == IEMMODE_16BIT)
1129 uNewEip &= UINT16_MAX;
1130 pVCpu->cpum.GstCtx.rip = uNewEip;
1131
1132#ifndef IEM_WITH_CODE_TLB
1133 iemOpcodeFlushLight(pVCpu, cbInstr);
1134#endif
1135
1136 /*
1137 * Clear RF and finish the instruction (maybe raise #DB).
1138 */
1139 return iemRegFinishClearingRF(pVCpu, rcNormal);
1140}
1141
1142
1143/**
1144 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
1145 *
1146 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1147 * segment limit.
1148 *
1149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1150 * @param cbInstr Instruction size.
1151 * @param offNextInstr The offset of the next instruction.
1152 * @param rcNormal VINF_SUCCESS to continue TB.
1153 * VINF_IEM_REEXEC_BREAK to force TB exit when
1154 * taking the wrong conditional branhc.
1155 */
1156DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1157 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
1158{
1159 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1160
1161 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
1162 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
1163 pVCpu->cpum.GstCtx.rip = uNewIp;
1164 else
1165 return iemRaiseGeneralProtectionFault0(pVCpu);
1166
1167#ifndef IEM_WITH_CODE_TLB
1168 iemOpcodeFlushLight(pVCpu, cbInstr);
1169#endif
1170
1171 /*
1172 * Clear RF and finish the instruction (maybe raise #DB).
1173 */
1174 return iemRegFinishClearingRF(pVCpu, rcNormal);
1175}
1176
1177
1178/**
1179 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
1180 * clearing of flags.
1181 *
1182 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1183 * segment limit.
1184 *
1185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1186 * @param cbInstr Instruction size.
1187 * @param offNextInstr The offset of the next instruction.
1188 * @param enmEffOpSize Effective operand size.
1189 * @param rcNormal VINF_SUCCESS to continue TB.
1190 * VINF_IEM_REEXEC_BREAK to force TB exit when
1191 * taking the wrong conditional branhc.
1192 */
1193DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1194 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1195{
1196 Assert(IEM_IS_64BIT_CODE(pVCpu));
1197 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
1198
1199 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1200 if (enmEffOpSize == IEMMODE_16BIT)
1201 uNewRip &= UINT16_MAX;
1202
1203 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1204 pVCpu->cpum.GstCtx.rip = uNewRip;
1205 else
1206 return iemRaiseGeneralProtectionFault0(pVCpu);
1207
1208#ifndef IEM_WITH_CODE_TLB
1209 iemOpcodeFlushLight(pVCpu, cbInstr);
1210#endif
1211 return iemRegFinishNoFlags(pVCpu, rcNormal);
1212}
1213
1214
1215/**
1216 * Adds a 8-bit signed jump offset to RIP from 64-bit code when caller is sure
1217 * it stays within the same page, no checking or clearing of flags.
1218 *
1219 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1220 * segment limit.
1221 *
1222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1223 * @param cbInstr Instruction size.
1224 * @param offNextInstr The offset of the next instruction.
1225 * @param enmEffOpSize Effective operand size.
1226 * @param rcNormal VINF_SUCCESS to continue TB.
1227 * VINF_IEM_REEXEC_BREAK to force TB exit when
1228 * taking the wrong conditional branhc.
1229 */
1230DECL_FORCE_INLINE(VBOXSTRICTRC)
1231iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1232 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1233{
1234 Assert(IEM_IS_64BIT_CODE(pVCpu));
1235 Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
1236
1237 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1238 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
1239 pVCpu->cpum.GstCtx.rip = uNewRip;
1240
1241#ifndef IEM_WITH_CODE_TLB
1242 iemOpcodeFlushLight(pVCpu, cbInstr);
1243#endif
1244 return iemRegFinishNoFlags(pVCpu, rcNormal);
1245}
1246
1247
1248/**
1249 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
1250 * code (never 64-bit), no checking or clearing of flags.
1251 *
1252 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1253 * segment limit.
1254 *
1255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1256 * @param cbInstr Instruction size.
1257 * @param offNextInstr The offset of the next instruction.
1258 * @param enmEffOpSize Effective operand size.
1259 * @param rcNormal VINF_SUCCESS to continue TB.
1260 * VINF_IEM_REEXEC_BREAK to force TB exit when
1261 * taking the wrong conditional branhc.
1262 */
1263DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1264 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1265{
1266 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1267 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1268
1269 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
1270 if (enmEffOpSize == IEMMODE_16BIT)
1271 uNewEip &= UINT16_MAX;
1272 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1273 pVCpu->cpum.GstCtx.rip = uNewEip;
1274 else
1275 return iemRaiseGeneralProtectionFault0(pVCpu);
1276
1277#ifndef IEM_WITH_CODE_TLB
1278 iemOpcodeFlushLight(pVCpu, cbInstr);
1279#endif
1280 return iemRegFinishNoFlags(pVCpu, rcNormal);
1281}
1282
1283
1284/**
1285 * Adds a 8-bit signed jump offset to EIP, on 386 or later from flat 32-bit code
1286 * (never 64-bit), no checking or clearing of flags.
1287 *
1288 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1289 * segment limit.
1290 *
1291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1292 * @param cbInstr Instruction size.
1293 * @param offNextInstr The offset of the next instruction.
1294 * @param enmEffOpSize Effective operand size.
1295 * @param rcNormal VINF_SUCCESS to continue TB.
1296 * VINF_IEM_REEXEC_BREAK to force TB exit when
1297 * taking the wrong conditional branhc.
1298 */
1299DECL_FORCE_INLINE(VBOXSTRICTRC)
1300iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1301 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
1302{
1303 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1304 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1305
1306 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
1307 if (enmEffOpSize == IEMMODE_16BIT)
1308 uNewEip &= UINT16_MAX;
1309 pVCpu->cpum.GstCtx.rip = uNewEip;
1310
1311#ifndef IEM_WITH_CODE_TLB
1312 iemOpcodeFlushLight(pVCpu, cbInstr);
1313#endif
1314 return iemRegFinishNoFlags(pVCpu, rcNormal);
1315}
1316
1317
1318/**
1319 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
1320 * clearing of flags.
1321 *
1322 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1323 * segment limit.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1326 * @param cbInstr Instruction size.
1327 * @param offNextInstr The offset of the next instruction.
1328 * @param rcNormal VINF_SUCCESS to continue TB.
1329 * VINF_IEM_REEXEC_BREAK to force TB exit when
1330 * taking the wrong conditional branhc.
1331 */
1332DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1333 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
1334{
1335 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1336
1337 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
1338 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
1339 pVCpu->cpum.GstCtx.rip = uNewIp;
1340 else
1341 return iemRaiseGeneralProtectionFault0(pVCpu);
1342
1343#ifndef IEM_WITH_CODE_TLB
1344 iemOpcodeFlushLight(pVCpu, cbInstr);
1345#endif
1346 return iemRegFinishNoFlags(pVCpu, rcNormal);
1347}
1348
1349
1350/**
1351 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1355 * @param cbInstr Instruction size.
1356 * @param offNextInstr The offset of the next instruction.
1357 * @param rcNormal VINF_SUCCESS to continue TB.
1358 * VINF_IEM_REEXEC_BREAK to force TB exit when
1359 * taking the wrong conditional branhc.
1360 */
1361DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1362 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
1363{
1364 Assert(IEM_IS_64BIT_CODE(pVCpu));
1365
1366 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
1367
1368#ifndef IEM_WITH_CODE_TLB
1369 iemOpcodeFlushLight(pVCpu, cbInstr);
1370#endif
1371
1372 /*
1373 * Clear RF and finish the instruction (maybe raise #DB).
1374 */
1375 return iemRegFinishClearingRF(pVCpu, rcNormal);
1376}
1377
1378
1379/**
1380 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
1381 *
1382 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1383 * segment limit.
1384 *
1385 * @returns Strict VBox status code.
1386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1387 * @param cbInstr Instruction size.
1388 * @param offNextInstr The offset of the next instruction.
1389 * @param rcNormal VINF_SUCCESS to continue TB.
1390 * VINF_IEM_REEXEC_BREAK to force TB exit when
1391 * taking the wrong conditional branhc.
1392 *
1393 * @note This is also used by 16-bit code in pre-386 mode, as the code is
1394 * identical.
1395 */
1396DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1397 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
1398{
1399 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1400
1401 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
1402 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
1403 pVCpu->cpum.GstCtx.rip = uNewIp;
1404 else
1405 return iemRaiseGeneralProtectionFault0(pVCpu);
1406
1407#ifndef IEM_WITH_CODE_TLB
1408 iemOpcodeFlushLight(pVCpu, cbInstr);
1409#endif
1410
1411 /*
1412 * Clear RF and finish the instruction (maybe raise #DB).
1413 */
1414 return iemRegFinishClearingRF(pVCpu, rcNormal);
1415}
1416
1417
1418/**
1419 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code.
1420 *
1421 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1422 * segment limit.
1423 *
1424 * @returns Strict VBox status code.
1425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1426 * @param cbInstr Instruction size.
1427 * @param offNextInstr The offset of the next instruction.
1428 * @param rcNormal VINF_SUCCESS to continue TB.
1429 * VINF_IEM_REEXEC_BREAK to force TB exit when
1430 * taking the wrong conditional branhc.
1431 *
1432 * @note This is also used by 16-bit code in pre-386 mode, as the code is
1433 * identical.
1434 */
1435DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1436 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
1437{
1438 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1439
1440 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
1441 pVCpu->cpum.GstCtx.rip = uNewIp;
1442
1443#ifndef IEM_WITH_CODE_TLB
1444 iemOpcodeFlushLight(pVCpu, cbInstr);
1445#endif
1446
1447 /*
1448 * Clear RF and finish the instruction (maybe raise #DB).
1449 */
1450 return iemRegFinishClearingRF(pVCpu, rcNormal);
1451}
1452
1453
1454/**
1455 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
1456 * clearing of flags.
1457 *
1458 * @returns Strict VBox status code.
1459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1460 * @param cbInstr Instruction size.
1461 * @param offNextInstr The offset of the next instruction.
1462 * @param rcNormal VINF_SUCCESS to continue TB.
1463 * VINF_IEM_REEXEC_BREAK to force TB exit when
1464 * taking the wrong conditional branhc.
1465 */
1466DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1467 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
1468{
1469 Assert(IEM_IS_64BIT_CODE(pVCpu));
1470
1471 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
1472
1473#ifndef IEM_WITH_CODE_TLB
1474 iemOpcodeFlushLight(pVCpu, cbInstr);
1475#endif
1476 return iemRegFinishNoFlags(pVCpu, rcNormal);
1477}
1478
1479
1480/**
1481 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
1482 * no checking or clearing of flags.
1483 *
1484 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1485 * segment limit.
1486 *
1487 * @returns Strict VBox status code.
1488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1489 * @param cbInstr Instruction size.
1490 * @param offNextInstr The offset of the next instruction.
1491 * @param rcNormal VINF_SUCCESS to continue TB.
1492 * VINF_IEM_REEXEC_BREAK to force TB exit when
1493 * taking the wrong conditional branhc.
1494 *
1495 * @note This is also used by 16-bit code in pre-386 mode, as the code is
1496 * identical.
1497 */
1498DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1499 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
1500{
1501 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1502
1503 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
1504 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
1505 pVCpu->cpum.GstCtx.rip = uNewIp;
1506 else
1507 return iemRaiseGeneralProtectionFault0(pVCpu);
1508
1509#ifndef IEM_WITH_CODE_TLB
1510 iemOpcodeFlushLight(pVCpu, cbInstr);
1511#endif
1512 return iemRegFinishNoFlags(pVCpu, rcNormal);
1513}
1514
1515
1516/**
1517 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code, no checking or
1518 * clearing of flags.
1519 *
1520 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1521 * segment limit.
1522 *
1523 * @returns Strict VBox status code.
1524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1525 * @param cbInstr Instruction size.
1526 * @param offNextInstr The offset of the next instruction.
1527 * @param rcNormal VINF_SUCCESS to continue TB.
1528 * VINF_IEM_REEXEC_BREAK to force TB exit when
1529 * taking the wrong conditional branhc.
1530 *
1531 * @note This is also used by 16-bit code in pre-386 mode, as the code is
1532 * identical.
1533 */
1534DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1535 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
1536{
1537 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1538
1539 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
1540 pVCpu->cpum.GstCtx.rip = uNewIp;
1541
1542#ifndef IEM_WITH_CODE_TLB
1543 iemOpcodeFlushLight(pVCpu, cbInstr);
1544#endif
1545 return iemRegFinishNoFlags(pVCpu, rcNormal);
1546}
1547
1548
1549/**
1550 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
1551 *
1552 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1553 * segment limit.
1554 *
1555 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
1556 * only alternative for relative jumps in 64-bit code and that is already
1557 * handled in the decoder stage.
1558 *
1559 * @returns Strict VBox status code.
1560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1561 * @param cbInstr Instruction size.
1562 * @param offNextInstr The offset of the next instruction.
1563 * @param rcNormal VINF_SUCCESS to continue TB.
1564 * VINF_IEM_REEXEC_BREAK to force TB exit when
1565 * taking the wrong conditional branhc.
1566 */
1567DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1568 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1569{
1570 Assert(IEM_IS_64BIT_CODE(pVCpu));
1571
1572 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1573 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1574 pVCpu->cpum.GstCtx.rip = uNewRip;
1575 else
1576 return iemRaiseGeneralProtectionFault0(pVCpu);
1577
1578#ifndef IEM_WITH_CODE_TLB
1579 iemOpcodeFlushLight(pVCpu, cbInstr);
1580#endif
1581
1582 /*
1583 * Clear RF and finish the instruction (maybe raise #DB).
1584 */
1585 return iemRegFinishClearingRF(pVCpu, rcNormal);
1586}
1587
1588
1589/**
1590 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
1591 * sure the target is in the same page.
1592 *
1593 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1594 * segment limit.
1595 *
1596 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
1597 * only alternative for relative jumps in 64-bit code and that is already
1598 * handled in the decoder stage.
1599 *
1600 * @returns Strict VBox status code.
1601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1602 * @param cbInstr Instruction size.
1603 * @param offNextInstr The offset of the next instruction.
1604 * @param rcNormal VINF_SUCCESS to continue TB.
1605 * VINF_IEM_REEXEC_BREAK to force TB exit when
1606 * taking the wrong conditional branhc.
1607 */
1608DECL_FORCE_INLINE(VBOXSTRICTRC)
1609iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1610 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1611{
1612 Assert(IEM_IS_64BIT_CODE(pVCpu));
1613
1614 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1615 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
1616 pVCpu->cpum.GstCtx.rip = uNewRip;
1617
1618#ifndef IEM_WITH_CODE_TLB
1619 iemOpcodeFlushLight(pVCpu, cbInstr);
1620#endif
1621
1622 /*
1623 * Clear RF and finish the instruction (maybe raise #DB).
1624 */
1625 return iemRegFinishClearingRF(pVCpu, rcNormal);
1626}
1627
1628
1629/**
1630 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
1631 *
1632 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1633 * segment limit.
1634 *
1635 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
1636 * only alternative for relative jumps in 32-bit code and that is already
1637 * handled in the decoder stage.
1638 *
1639 * @returns Strict VBox status code.
1640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1641 * @param cbInstr Instruction size.
1642 * @param offNextInstr The offset of the next instruction.
1643 * @param rcNormal VINF_SUCCESS to continue TB.
1644 * VINF_IEM_REEXEC_BREAK to force TB exit when
1645 * taking the wrong conditional branhc.
1646 */
1647DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1648 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1649{
1650 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1651 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
1652
1653 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
1654 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1655 pVCpu->cpum.GstCtx.rip = uNewEip;
1656 else
1657 return iemRaiseGeneralProtectionFault0(pVCpu);
1658
1659#ifndef IEM_WITH_CODE_TLB
1660 iemOpcodeFlushLight(pVCpu, cbInstr);
1661#endif
1662
1663 /*
1664 * Clear RF and finish the instruction (maybe raise #DB).
1665 */
1666 return iemRegFinishClearingRF(pVCpu, rcNormal);
1667}
1668
1669
1670/**
1671 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code.
1672 *
1673 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1674 * segment limit.
1675 *
1676 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
1677 * only alternative for relative jumps in 32-bit code and that is already
1678 * handled in the decoder stage.
1679 *
1680 * @returns Strict VBox status code.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 * @param cbInstr Instruction size.
1683 * @param offNextInstr The offset of the next instruction.
1684 * @param rcNormal VINF_SUCCESS to continue TB.
1685 * VINF_IEM_REEXEC_BREAK to force TB exit when
1686 * taking the wrong conditional branhc.
1687 */
1688DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
1689 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1690{
1691 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1692 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
1693
1694 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
1695 pVCpu->cpum.GstCtx.rip = uNewEip;
1696
1697#ifndef IEM_WITH_CODE_TLB
1698 iemOpcodeFlushLight(pVCpu, cbInstr);
1699#endif
1700
1701 /*
1702 * Clear RF and finish the instruction (maybe raise #DB).
1703 */
1704 return iemRegFinishClearingRF(pVCpu, rcNormal);
1705}
1706
1707
1708
1709/**
1710 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
1711 * clearing of flags.
1712 *
1713 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1714 * segment limit.
1715 *
1716 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
1717 * only alternative for relative jumps in 64-bit code and that is already
1718 * handled in the decoder stage.
1719 *
1720 * @returns Strict VBox status code.
1721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1722 * @param cbInstr Instruction size.
1723 * @param offNextInstr The offset of the next instruction.
1724 * @param rcNormal VINF_SUCCESS to continue TB.
1725 * VINF_IEM_REEXEC_BREAK to force TB exit when
1726 * taking the wrong conditional branhc.
1727 */
1728DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1729 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1730{
1731 Assert(IEM_IS_64BIT_CODE(pVCpu));
1732
1733 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1734 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1735 pVCpu->cpum.GstCtx.rip = uNewRip;
1736 else
1737 return iemRaiseGeneralProtectionFault0(pVCpu);
1738
1739#ifndef IEM_WITH_CODE_TLB
1740 iemOpcodeFlushLight(pVCpu, cbInstr);
1741#endif
1742 return iemRegFinishNoFlags(pVCpu, rcNormal);
1743}
1744
1745
1746/**
1747 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
1748 * sure it stays within the same page, no checking or clearing of flags.
1749 *
1750 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1751 * segment limit.
1752 *
1753 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
1754 * only alternative for relative jumps in 64-bit code and that is already
1755 * handled in the decoder stage.
1756 *
1757 * @returns Strict VBox status code.
1758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1759 * @param cbInstr Instruction size.
1760 * @param offNextInstr The offset of the next instruction.
1761 * @param rcNormal VINF_SUCCESS to continue TB.
1762 * VINF_IEM_REEXEC_BREAK to force TB exit when
1763 * taking the wrong conditional branhc.
1764 */
1765DECL_FORCE_INLINE(VBOXSTRICTRC)
1766iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1767{
1768 Assert(IEM_IS_64BIT_CODE(pVCpu));
1769
1770 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1771 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
1772 pVCpu->cpum.GstCtx.rip = uNewRip;
1773
1774#ifndef IEM_WITH_CODE_TLB
1775 iemOpcodeFlushLight(pVCpu, cbInstr);
1776#endif
1777 return iemRegFinishNoFlags(pVCpu, rcNormal);
1778}
1779
1780
1781/**
1782 * Adds a 32-bit signed jump offset to RIP from 32-bit code, no checking or
1783 * clearing of flags.
1784 *
1785 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1786 * segment limit.
1787 *
1788 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
1789 * only alternative for relative jumps in 32-bit code and that is already
1790 * handled in the decoder stage.
1791 *
1792 * @returns Strict VBox status code.
1793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1794 * @param cbInstr Instruction size.
1795 * @param offNextInstr The offset of the next instruction.
1796 * @param rcNormal VINF_SUCCESS to continue TB.
1797 * VINF_IEM_REEXEC_BREAK to force TB exit when
1798 * taking the wrong conditional branhc.
1799 */
1800DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1801 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1802{
1803 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1804 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
1805
1806 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
1807 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1808 pVCpu->cpum.GstCtx.rip = uNewEip;
1809 else
1810 return iemRaiseGeneralProtectionFault0(pVCpu);
1811
1812#ifndef IEM_WITH_CODE_TLB
1813 iemOpcodeFlushLight(pVCpu, cbInstr);
1814#endif
1815 return iemRegFinishNoFlags(pVCpu, rcNormal);
1816}
1817
1818
1819/**
1820 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code, no checking or
1821 * clearing of flags.
1822 *
1823 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1824 * segment limit.
1825 *
1826 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
1827 * only alternative for relative jumps in 32-bit code and that is already
1828 * handled in the decoder stage.
1829 *
1830 * @returns Strict VBox status code.
1831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1832 * @param cbInstr Instruction size.
1833 * @param offNextInstr The offset of the next instruction.
1834 * @param rcNormal VINF_SUCCESS to continue TB.
1835 * VINF_IEM_REEXEC_BREAK to force TB exit when
1836 * taking the wrong conditional branhc.
1837 */
1838DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
1839 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
1840{
1841 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1842 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
1843
1844 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
1845 pVCpu->cpum.GstCtx.rip = uNewEip;
1846
1847#ifndef IEM_WITH_CODE_TLB
1848 iemOpcodeFlushLight(pVCpu, cbInstr);
1849#endif
1850 return iemRegFinishNoFlags(pVCpu, rcNormal);
1851}
1852
1853
1854/**
1855 * Extended version of iemFinishInstructionWithFlagsSet that goes with
1856 * iemRegAddToRipAndFinishingClearingRfEx.
1857 *
1858 * See iemFinishInstructionWithFlagsSet() for details.
1859 */
1860static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1861{
1862 /*
1863 * Raise a #DB.
1864 */
1865 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1866 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1867 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
1868 | ( (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
1869 >> CPUMCTX_DBG_HIT_DRX_SHIFT);
1870 /** @todo Do we set all pending \#DB events, or just one? */
1871 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
1872 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1873 pVCpu->cpum.GstCtx.rflags.uBoth));
1874 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
1875 return iemRaiseDebugException(pVCpu);
1876}
1877
1878
1879/**
1880 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
1881 * others potentially updating EFLAGS.TF.
1882 *
1883 * The single step event must be generated using the TF value at the start of
1884 * the instruction, not the new value set by it.
1885 *
1886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1887 * @param cbInstr The number of bytes to add.
1888 * @param fEflOld The EFLAGS at the start of the instruction
1889 * execution.
1890 */
1891DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
1892{
1893 iemRegAddToRip(pVCpu, cbInstr);
1894 if (!(fEflOld & X86_EFL_TF))
1895 {
1896 /* Specialized iemRegFinishClearingRF edition here that doesn't check X86_EFL_TF. */
1897 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
1898 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
1899 & (X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
1900 return VINF_SUCCESS;
1901 return iemFinishInstructionWithFlagsSet<0 /*a_fTF*/>(pVCpu, VINF_SUCCESS); /* TF=0, so ignore it. */
1902 }
1903 return iemFinishInstructionWithTfSet(pVCpu);
1904}
1905
1906
1907#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1908/**
1909 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
1910 *
1911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1912 */
1913DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1914{
1915 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
1916}
1917#endif
1918
1919
1920#ifdef IEM_WITH_CODE_TLB
1921
1922/**
1923 * Performs a near jump to the specified address, no checking or clearing of
1924 * flags
1925 *
1926 * May raise a \#GP(0) if the new IP outside the code segment limit.
1927 *
1928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1929 * @param uNewIp The new IP value.
1930 */
1931DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishNoFlags(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
1932{
1933 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
1934 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
1935 pVCpu->cpum.GstCtx.rip = uNewIp;
1936 else
1937 return iemRaiseGeneralProtectionFault0(pVCpu);
1938 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
1939}
1940
1941
1942/**
1943 * Performs a near jump to the specified address, no checking or clearing of
1944 * flags
1945 *
1946 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
1947 *
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uNewEip The new EIP value.
1950 */
1951DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishNoFlags(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
1952{
1953 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
1954 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1955 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1956 pVCpu->cpum.GstCtx.rip = uNewEip;
1957 else
1958 return iemRaiseGeneralProtectionFault0(pVCpu);
1959 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
1960}
1961
1962
1963/**
1964 * Performs a near jump to the specified address, no checking or clearing of
1965 * flags.
1966 *
1967 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1968 * segment limit.
1969 *
1970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1971 * @param uNewRip The new RIP value.
1972 */
1973DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishNoFlags(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
1974{
1975 Assert(IEM_IS_64BIT_CODE(pVCpu));
1976 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1977 pVCpu->cpum.GstCtx.rip = uNewRip;
1978 else
1979 return iemRaiseGeneralProtectionFault0(pVCpu);
1980 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
1981}
1982
1983#endif /* IEM_WITH_CODE_TLB */
1984
1985/**
1986 * Performs a near jump to the specified address.
1987 *
1988 * May raise a \#GP(0) if the new IP outside the code segment limit.
1989 *
1990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1991 * @param uNewIp The new IP value.
1992 * @param cbInstr The instruction length, for flushing in the non-TLB case.
1993 */
1994DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishClearingRF(PVMCPUCC pVCpu, uint16_t uNewIp, uint8_t cbInstr) RT_NOEXCEPT
1995{
1996 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
1997 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
1998 pVCpu->cpum.GstCtx.rip = uNewIp;
1999 else
2000 return iemRaiseGeneralProtectionFault0(pVCpu);
2001#ifndef IEM_WITH_CODE_TLB
2002 iemOpcodeFlushLight(pVCpu, cbInstr);
2003#else
2004 RT_NOREF_PV(cbInstr);
2005#endif
2006 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2007}
2008
2009
2010/**
2011 * Performs a near jump to the specified address.
2012 *
2013 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
2014 *
2015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2016 * @param uNewEip The new EIP value.
2017 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2018 */
2019DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishClearingRF(PVMCPUCC pVCpu, uint32_t uNewEip, uint8_t cbInstr) RT_NOEXCEPT
2020{
2021 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2022 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2023 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2024 pVCpu->cpum.GstCtx.rip = uNewEip;
2025 else
2026 return iemRaiseGeneralProtectionFault0(pVCpu);
2027#ifndef IEM_WITH_CODE_TLB
2028 iemOpcodeFlushLight(pVCpu, cbInstr);
2029#else
2030 RT_NOREF_PV(cbInstr);
2031#endif
2032 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2033}
2034
2035
2036/**
2037 * Performs a near jump to the specified address.
2038 *
2039 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2040 * segment limit.
2041 *
2042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2043 * @param uNewRip The new RIP value.
2044 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2045 */
2046DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishClearingRF(PVMCPUCC pVCpu, uint64_t uNewRip, uint8_t cbInstr) RT_NOEXCEPT
2047{
2048 Assert(IEM_IS_64BIT_CODE(pVCpu));
2049 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2050 pVCpu->cpum.GstCtx.rip = uNewRip;
2051 else
2052 return iemRaiseGeneralProtectionFault0(pVCpu);
2053#ifndef IEM_WITH_CODE_TLB
2054 iemOpcodeFlushLight(pVCpu, cbInstr);
2055#else
2056 RT_NOREF_PV(cbInstr);
2057#endif
2058 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2059}
2060
2061
2062/**
2063 * Implements a 16-bit relative call, no checking or clearing of
2064 * flags.
2065 *
2066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2067 * @param cbInstr The instruction length.
2068 * @param offDisp The 16-bit displacement.
2069 */
2070DECL_FORCE_INLINE(VBOXSTRICTRC)
2071iemRegRipRelativeCallS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offDisp) RT_NOEXCEPT
2072{
2073 uint16_t const uOldIp = pVCpu->cpum.GstCtx.ip + cbInstr;
2074 uint16_t const uNewIp = uOldIp + offDisp;
2075 if ( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
2076 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */)
2077 { /* likely */ }
2078 else
2079 return iemRaiseGeneralProtectionFault0(pVCpu);
2080
2081 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldIp);
2082 if (rcStrict == VINF_SUCCESS)
2083 { /* likely */ }
2084 else
2085 return rcStrict;
2086
2087 pVCpu->cpum.GstCtx.rip = uNewIp;
2088#ifndef IEM_WITH_CODE_TLB
2089 iemOpcodeFlushLight(pVCpu, cbInstr);
2090#endif
2091 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2092}
2093
2094
2095/**
2096 * Implements a 16-bit relative call.
2097 *
2098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2099 * @param cbInstr The instruction length.
2100 * @param offDisp The 16-bit displacement.
2101 */
2102DECL_FORCE_INLINE(VBOXSTRICTRC)
2103iemRegRipRelativeCallS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offDisp) RT_NOEXCEPT
2104{
2105 uint16_t const uOldIp = pVCpu->cpum.GstCtx.ip + cbInstr;
2106 uint16_t const uNewIp = uOldIp + offDisp;
2107 if ( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
2108 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */)
2109 { /* likely */ }
2110 else
2111 return iemRaiseGeneralProtectionFault0(pVCpu);
2112
2113 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldIp);
2114 if (rcStrict == VINF_SUCCESS)
2115 { /* likely */ }
2116 else
2117 return rcStrict;
2118
2119 pVCpu->cpum.GstCtx.rip = uNewIp;
2120#ifndef IEM_WITH_CODE_TLB
2121 iemOpcodeFlushLight(pVCpu, cbInstr);
2122#endif
2123 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2124}
2125
2126
2127/**
2128 * Implements a 32-bit relative call, no checking or clearing of flags.
2129 *
2130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2131 * @param cbInstr The instruction length.
2132 * @param offDisp The 32-bit displacement.
2133 */
2134DECL_FORCE_INLINE(VBOXSTRICTRC)
2135iemRegEip32RelativeCallS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offDisp) RT_NOEXCEPT
2136{
2137 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
2138
2139 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
2140 uint32_t const uNewRip = uOldRip + offDisp;
2141 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2142 { /* likely */ }
2143 else
2144 return iemRaiseGeneralProtectionFault0(pVCpu);
2145
2146 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
2147 if (rcStrict == VINF_SUCCESS)
2148 { /* likely */ }
2149 else
2150 return rcStrict;
2151
2152 pVCpu->cpum.GstCtx.rip = uNewRip;
2153#ifndef IEM_WITH_CODE_TLB
2154 iemOpcodeFlushLight(pVCpu, cbInstr);
2155#endif
2156 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2157}
2158
2159
2160/**
2161 * Implements a 32-bit relative call.
2162 *
2163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2164 * @param cbInstr The instruction length.
2165 * @param offDisp The 32-bit displacement.
2166 */
2167DECL_FORCE_INLINE(VBOXSTRICTRC)
2168iemRegEip32RelativeCallS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offDisp) RT_NOEXCEPT
2169{
2170 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
2171
2172 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
2173 uint32_t const uNewRip = uOldRip + offDisp;
2174 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2175 { /* likely */ }
2176 else
2177 return iemRaiseGeneralProtectionFault0(pVCpu);
2178
2179 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
2180 if (rcStrict == VINF_SUCCESS)
2181 { /* likely */ }
2182 else
2183 return rcStrict;
2184
2185 pVCpu->cpum.GstCtx.rip = uNewRip;
2186#ifndef IEM_WITH_CODE_TLB
2187 iemOpcodeFlushLight(pVCpu, cbInstr);
2188#endif
2189 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2190}
2191
2192
2193/**
2194 * Implements a 64-bit relative call, no checking or clearing of flags.
2195 *
2196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2197 * @param cbInstr The instruction length.
2198 * @param offDisp The 64-bit displacement.
2199 */
2200DECL_FORCE_INLINE(VBOXSTRICTRC)
2201iemRegRip64RelativeCallS64AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int64_t offDisp) RT_NOEXCEPT
2202{
2203 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
2204 uint64_t const uNewRip = uOldRip + (int64_t)offDisp;
2205 if (IEM_IS_CANONICAL(uNewRip))
2206 { /* likely */ }
2207 else
2208 return iemRaiseNotCanonical(pVCpu);
2209
2210 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
2211 if (rcStrict == VINF_SUCCESS)
2212 { /* likely */ }
2213 else
2214 return rcStrict;
2215
2216 pVCpu->cpum.GstCtx.rip = uNewRip;
2217#ifndef IEM_WITH_CODE_TLB
2218 iemOpcodeFlushLight(pVCpu, cbInstr);
2219#endif
2220 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2221}
2222
2223
2224/**
2225 * Implements a 64-bit relative call.
2226 *
2227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2228 * @param cbInstr The instruction length.
2229 * @param offDisp The 64-bit displacement.
2230 */
2231DECL_FORCE_INLINE(VBOXSTRICTRC)
2232iemRegRip64RelativeCallS64AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int64_t offDisp) RT_NOEXCEPT
2233{
2234 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
2235 uint64_t const uNewRip = uOldRip + (int64_t)offDisp;
2236 if (IEM_IS_CANONICAL(uNewRip))
2237 { /* likely */ }
2238 else
2239 return iemRaiseNotCanonical(pVCpu);
2240
2241 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
2242 if (rcStrict == VINF_SUCCESS)
2243 { /* likely */ }
2244 else
2245 return rcStrict;
2246
2247 pVCpu->cpum.GstCtx.rip = uNewRip;
2248#ifndef IEM_WITH_CODE_TLB
2249 iemOpcodeFlushLight(pVCpu, cbInstr);
2250#endif
2251 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2252}
2253
2254
2255/**
2256 * Implements an 16-bit indirect call, no checking or clearing of
2257 * flags.
2258 *
2259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2260 * @param cbInstr The instruction length.
2261 * @param uNewRip The new RIP value.
2262 */
2263DECL_FORCE_INLINE(VBOXSTRICTRC)
2264iemRegIp16IndirectCallU16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
2265{
2266 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
2267 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2268 { /* likely */ }
2269 else
2270 return iemRaiseGeneralProtectionFault0(pVCpu);
2271
2272 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
2273 if (rcStrict == VINF_SUCCESS)
2274 { /* likely */ }
2275 else
2276 return rcStrict;
2277
2278 pVCpu->cpum.GstCtx.rip = uNewRip;
2279#ifndef IEM_WITH_CODE_TLB
2280 iemOpcodeFlushLight(pVCpu, cbInstr);
2281#endif
2282 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2283}
2284
2285
2286/**
2287 * Implements an 16-bit indirect call, no checking or clearing of
2288 * flags.
2289 *
2290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2291 * @param cbInstr The instruction length.
2292 * @param uNewRip The new RIP value.
2293 */
2294DECL_FORCE_INLINE(VBOXSTRICTRC)
2295iemRegEip32IndirectCallU16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
2296{
2297 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
2298 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2299 { /* likely */ }
2300 else
2301 return iemRaiseGeneralProtectionFault0(pVCpu);
2302
2303 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
2304 if (rcStrict == VINF_SUCCESS)
2305 { /* likely */ }
2306 else
2307 return rcStrict;
2308
2309 pVCpu->cpum.GstCtx.rip = uNewRip;
2310#ifndef IEM_WITH_CODE_TLB
2311 iemOpcodeFlushLight(pVCpu, cbInstr);
2312#endif
2313 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2314}
2315
2316
2317/**
2318 * Implements an 16-bit indirect call.
2319 *
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param cbInstr The instruction length.
2322 * @param uNewRip The new RIP value.
2323 */
2324DECL_FORCE_INLINE(VBOXSTRICTRC)
2325iemRegIp16IndirectCallU16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
2326{
2327 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
2328 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2329 { /* likely */ }
2330 else
2331 return iemRaiseGeneralProtectionFault0(pVCpu);
2332
2333 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
2334 if (rcStrict == VINF_SUCCESS)
2335 { /* likely */ }
2336 else
2337 return rcStrict;
2338
2339 pVCpu->cpum.GstCtx.rip = uNewRip;
2340#ifndef IEM_WITH_CODE_TLB
2341 iemOpcodeFlushLight(pVCpu, cbInstr);
2342#endif
2343 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2344}
2345
2346
2347/**
2348 * Implements an 16-bit indirect call.
2349 *
2350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2351 * @param cbInstr The instruction length.
2352 * @param uNewRip The new RIP value.
2353 */
2354DECL_FORCE_INLINE(VBOXSTRICTRC)
2355iemRegEip32IndirectCallU16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
2356{
2357 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
2358 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2359 { /* likely */ }
2360 else
2361 return iemRaiseGeneralProtectionFault0(pVCpu);
2362
2363 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
2364 if (rcStrict == VINF_SUCCESS)
2365 { /* likely */ }
2366 else
2367 return rcStrict;
2368
2369 pVCpu->cpum.GstCtx.rip = uNewRip;
2370#ifndef IEM_WITH_CODE_TLB
2371 iemOpcodeFlushLight(pVCpu, cbInstr);
2372#endif
2373 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2374}
2375
2376
2377/**
2378 * Implements an 32-bit indirect call, no checking or clearing of
2379 * flags.
2380 *
2381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2382 * @param cbInstr The instruction length.
2383 * @param uNewRip The new RIP value.
2384 */
2385DECL_FORCE_INLINE(VBOXSTRICTRC)
2386iemRegEip32IndirectCallU32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t uNewRip) RT_NOEXCEPT
2387{
2388 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
2389 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2390 { /* likely */ }
2391 else
2392 return iemRaiseGeneralProtectionFault0(pVCpu);
2393
2394 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
2395 if (rcStrict == VINF_SUCCESS)
2396 { /* likely */ }
2397 else
2398 return rcStrict;
2399
2400 pVCpu->cpum.GstCtx.rip = uNewRip;
2401#ifndef IEM_WITH_CODE_TLB
2402 iemOpcodeFlushLight(pVCpu, cbInstr);
2403#endif
2404 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2405}
2406
2407
2408/**
2409 * Implements an 32-bit indirect call.
2410 *
2411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2412 * @param cbInstr The instruction length.
2413 * @param uNewRip The new RIP value.
2414 */
2415DECL_FORCE_INLINE(VBOXSTRICTRC)
2416iemRegEip32IndirectCallU32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t uNewRip) RT_NOEXCEPT
2417{
2418 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
2419 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
2420 { /* likely */ }
2421 else
2422 return iemRaiseGeneralProtectionFault0(pVCpu);
2423
2424 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
2425 if (rcStrict == VINF_SUCCESS)
2426 { /* likely */ }
2427 else
2428 return rcStrict;
2429
2430 pVCpu->cpum.GstCtx.rip = uNewRip;
2431#ifndef IEM_WITH_CODE_TLB
2432 iemOpcodeFlushLight(pVCpu, cbInstr);
2433#endif
2434 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2435}
2436
2437
2438/**
2439 * Implements an 64-bit indirect call, no checking or clearing of
2440 * flags.
2441 *
2442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2443 * @param cbInstr The instruction length.
2444 * @param uNewRip The new RIP value.
2445 */
2446DECL_FORCE_INLINE(VBOXSTRICTRC)
2447iemRegRip64IndirectCallU64AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t uNewRip) RT_NOEXCEPT
2448{
2449 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
2450 if (IEM_IS_CANONICAL(uNewRip))
2451 { /* likely */ }
2452 else
2453 return iemRaiseGeneralProtectionFault0(pVCpu);
2454
2455 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
2456 if (rcStrict == VINF_SUCCESS)
2457 { /* likely */ }
2458 else
2459 return rcStrict;
2460
2461 pVCpu->cpum.GstCtx.rip = uNewRip;
2462#ifndef IEM_WITH_CODE_TLB
2463 iemOpcodeFlushLight(pVCpu, cbInstr);
2464#endif
2465 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2466}
2467
2468
2469/**
2470 * Implements an 64-bit indirect call.
2471 *
2472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2473 * @param cbInstr The instruction length.
2474 * @param uNewRip The new RIP value.
2475 */
2476DECL_FORCE_INLINE(VBOXSTRICTRC)
2477iemRegRip64IndirectCallU64AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t uNewRip) RT_NOEXCEPT
2478{
2479 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
2480 if (IEM_IS_CANONICAL(uNewRip))
2481 { /* likely */ }
2482 else
2483 return iemRaiseGeneralProtectionFault0(pVCpu);
2484
2485 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
2486 if (rcStrict == VINF_SUCCESS)
2487 { /* likely */ }
2488 else
2489 return rcStrict;
2490
2491 pVCpu->cpum.GstCtx.rip = uNewRip;
2492#ifndef IEM_WITH_CODE_TLB
2493 iemOpcodeFlushLight(pVCpu, cbInstr);
2494#endif
2495 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2496}
2497
2498
2499
2500/**
2501 * Adds to the stack pointer.
2502 *
2503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2504 * @param cbToAdd The number of bytes to add (8-bit!).
2505 */
2506DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
2507{
2508 if (IEM_IS_64BIT_CODE(pVCpu))
2509 pVCpu->cpum.GstCtx.rsp += cbToAdd;
2510 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2511 pVCpu->cpum.GstCtx.esp += cbToAdd;
2512 else
2513 pVCpu->cpum.GstCtx.sp += cbToAdd;
2514}
2515
2516
2517/**
2518 * Subtracts from the stack pointer.
2519 *
2520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2521 * @param cbToSub The number of bytes to subtract (8-bit!).
2522 */
2523DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
2524{
2525 if (IEM_IS_64BIT_CODE(pVCpu))
2526 pVCpu->cpum.GstCtx.rsp -= cbToSub;
2527 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2528 pVCpu->cpum.GstCtx.esp -= cbToSub;
2529 else
2530 pVCpu->cpum.GstCtx.sp -= cbToSub;
2531}
2532
2533
2534/**
2535 * Adds to the temporary stack pointer.
2536 *
2537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2538 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2539 * @param cbToAdd The number of bytes to add (16-bit).
2540 */
2541DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
2542{
2543 if (IEM_IS_64BIT_CODE(pVCpu))
2544 pTmpRsp->u += cbToAdd;
2545 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2546 pTmpRsp->DWords.dw0 += cbToAdd;
2547 else
2548 pTmpRsp->Words.w0 += cbToAdd;
2549}
2550
2551
2552/**
2553 * Subtracts from the temporary stack pointer.
2554 *
2555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2556 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2557 * @param cbToSub The number of bytes to subtract.
2558 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
2559 * expecting that.
2560 */
2561DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
2562{
2563 if (IEM_IS_64BIT_CODE(pVCpu))
2564 pTmpRsp->u -= cbToSub;
2565 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2566 pTmpRsp->DWords.dw0 -= cbToSub;
2567 else
2568 pTmpRsp->Words.w0 -= cbToSub;
2569}
2570
2571
2572/**
2573 * Calculates the effective stack address for a push of the specified size as
2574 * well as the new RSP value (upper bits may be masked).
2575 *
2576 * @returns Effective stack addressf for the push.
2577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2578 * @param cbItem The size of the stack item to pop.
2579 * @param puNewRsp Where to return the new RSP value.
2580 */
2581DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2582{
2583 RTUINT64U uTmpRsp;
2584 RTGCPTR GCPtrTop;
2585 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2586
2587 if (IEM_IS_64BIT_CODE(pVCpu))
2588 GCPtrTop = uTmpRsp.u -= cbItem;
2589 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2590 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2591 else
2592 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2593 *puNewRsp = uTmpRsp.u;
2594 return GCPtrTop;
2595}
2596
2597
2598/**
2599 * Gets the current stack pointer and calculates the value after a pop of the
2600 * specified size.
2601 *
2602 * @returns Current stack pointer.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 * @param cbItem The size of the stack item to pop.
2605 * @param puNewRsp Where to return the new RSP value.
2606 */
2607DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2608{
2609 RTUINT64U uTmpRsp;
2610 RTGCPTR GCPtrTop;
2611 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2612
2613 if (IEM_IS_64BIT_CODE(pVCpu))
2614 {
2615 GCPtrTop = uTmpRsp.u;
2616 uTmpRsp.u += cbItem;
2617 }
2618 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2619 {
2620 GCPtrTop = uTmpRsp.DWords.dw0;
2621 uTmpRsp.DWords.dw0 += cbItem;
2622 }
2623 else
2624 {
2625 GCPtrTop = uTmpRsp.Words.w0;
2626 uTmpRsp.Words.w0 += cbItem;
2627 }
2628 *puNewRsp = uTmpRsp.u;
2629 return GCPtrTop;
2630}
2631
2632
2633/**
2634 * Calculates the effective stack address for a push of the specified size as
2635 * well as the new temporary RSP value (upper bits may be masked).
2636 *
2637 * @returns Effective stack addressf for the push.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param pTmpRsp The temporary stack pointer. This is updated.
2640 * @param cbItem The size of the stack item to pop.
2641 */
2642DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2643{
2644 RTGCPTR GCPtrTop;
2645
2646 if (IEM_IS_64BIT_CODE(pVCpu))
2647 GCPtrTop = pTmpRsp->u -= cbItem;
2648 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2649 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2650 else
2651 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2652 return GCPtrTop;
2653}
2654
2655
2656/**
2657 * Gets the effective stack address for a pop of the specified size and
2658 * calculates and updates the temporary RSP.
2659 *
2660 * @returns Current stack pointer.
2661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2662 * @param pTmpRsp The temporary stack pointer. This is updated.
2663 * @param cbItem The size of the stack item to pop.
2664 */
2665DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2666{
2667 RTGCPTR GCPtrTop;
2668 if (IEM_IS_64BIT_CODE(pVCpu))
2669 {
2670 GCPtrTop = pTmpRsp->u;
2671 pTmpRsp->u += cbItem;
2672 }
2673 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2674 {
2675 GCPtrTop = pTmpRsp->DWords.dw0;
2676 pTmpRsp->DWords.dw0 += cbItem;
2677 }
2678 else
2679 {
2680 GCPtrTop = pTmpRsp->Words.w0;
2681 pTmpRsp->Words.w0 += cbItem;
2682 }
2683 return GCPtrTop;
2684}
2685
2686
2687/** Common body for iemRegRipNearReturnAndFinishClearingRF()
2688 * and iemRegRipNearReturnAndFinishNoFlags(). */
2689template<bool a_fWithFlags>
2690DECL_FORCE_INLINE(VBOXSTRICTRC)
2691iemRegRipNearReturnCommon(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
2692{
2693 /* Fetch the new RIP from the stack. */
2694 VBOXSTRICTRC rcStrict;
2695 RTUINT64U NewRip;
2696 RTUINT64U NewRsp;
2697 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2698 switch (enmEffOpSize)
2699 {
2700 case IEMMODE_16BIT:
2701 NewRip.u = 0;
2702 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2703 break;
2704 case IEMMODE_32BIT:
2705 NewRip.u = 0;
2706 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2707 break;
2708 case IEMMODE_64BIT:
2709 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2710 break;
2711 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2712 }
2713 if (rcStrict != VINF_SUCCESS)
2714 return rcStrict;
2715
2716 /* Check the new ew RIP before loading it. */
2717 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2718 * of it. The canonical test is performed here and for call. */
2719 if (enmEffOpSize != IEMMODE_64BIT)
2720 {
2721 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))
2722 { /* likely */ }
2723 else
2724 {
2725 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2726 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2727 }
2728 }
2729 else
2730 {
2731 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))
2732 { /* likely */ }
2733 else
2734 {
2735 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2736 return iemRaiseNotCanonical(pVCpu);
2737 }
2738 }
2739
2740 /* Apply cbPop */
2741 if (cbPop)
2742 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2743
2744 /* Commit it. */
2745 pVCpu->cpum.GstCtx.rip = NewRip.u;
2746 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2747
2748 /* Flush the prefetch buffer. */
2749#ifndef IEM_WITH_CODE_TLB
2750 iemOpcodeFlushLight(pVCpu, cbInstr);
2751#endif
2752 RT_NOREF(cbInstr);
2753
2754
2755 if (a_fWithFlags)
2756 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2757 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2758}
2759
2760
2761/**
2762 * Implements retn and retn imm16.
2763 *
2764 * @param pVCpu The cross context virtual CPU structure of the
2765 * calling thread.
2766 * @param cbInstr The current instruction length.
2767 * @param enmEffOpSize The effective operand size. This is constant.
2768 * @param cbPop The amount of arguments to pop from the stack
2769 * (bytes). This can be constant (zero).
2770 */
2771DECL_FORCE_INLINE(VBOXSTRICTRC)
2772iemRegRipNearReturnAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
2773{
2774 return iemRegRipNearReturnCommon<true /*a_fWithFlags*/>(pVCpu, cbInstr, cbPop, enmEffOpSize);
2775}
2776
2777
2778/**
2779 * Implements retn and retn imm16, no checking or clearing of
2780 * flags.
2781 *
2782 * @param pVCpu The cross context virtual CPU structure of the
2783 * calling thread.
2784 * @param cbInstr The current instruction length.
2785 * @param enmEffOpSize The effective operand size. This is constant.
2786 * @param cbPop The amount of arguments to pop from the stack
2787 * (bytes). This can be constant (zero).
2788 */
2789DECL_FORCE_INLINE(VBOXSTRICTRC)
2790iemRegRipNearReturnAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
2791{
2792 return iemRegRipNearReturnCommon<false /*a_fWithFlags*/>(pVCpu, cbInstr, cbPop, enmEffOpSize);
2793}
2794
2795/** @} */
2796
2797
2798/** @name FPU access and helpers.
2799 *
2800 * @{
2801 */
2802
2803
2804/**
2805 * Hook for preparing to use the host FPU.
2806 *
2807 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2808 *
2809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2810 */
2811DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2812{
2813#ifdef IN_RING3
2814 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2815#else
2816 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2817#endif
2818 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2819}
2820
2821
2822/**
2823 * Hook for preparing to use the host FPU for SSE.
2824 *
2825 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2826 *
2827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2828 */
2829DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2830{
2831 iemFpuPrepareUsage(pVCpu);
2832}
2833
2834
2835/**
2836 * Hook for preparing to use the host FPU for AVX.
2837 *
2838 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2839 *
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 */
2842DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2843{
2844 iemFpuPrepareUsage(pVCpu);
2845}
2846
2847
2848/**
2849 * Hook for actualizing the guest FPU state before the interpreter reads it.
2850 *
2851 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2852 *
2853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2854 */
2855DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2856{
2857#ifdef IN_RING3
2858 NOREF(pVCpu);
2859#else
2860 CPUMRZFpuStateActualizeForRead(pVCpu);
2861#endif
2862 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2863}
2864
2865
2866/**
2867 * Hook for actualizing the guest FPU state before the interpreter changes it.
2868 *
2869 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2870 *
2871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2872 */
2873DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2874{
2875#ifdef IN_RING3
2876 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2877#else
2878 CPUMRZFpuStateActualizeForChange(pVCpu);
2879#endif
2880 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2881}
2882
2883
2884/**
2885 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2886 * only.
2887 *
2888 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2889 *
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 */
2892DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2893{
2894#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2895 NOREF(pVCpu);
2896#else
2897 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2898#endif
2899 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2900}
2901
2902
2903/**
2904 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2905 * read+write.
2906 *
2907 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2908 *
2909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2910 */
2911DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2912{
2913#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2914 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2915#else
2916 CPUMRZFpuStateActualizeForChange(pVCpu);
2917#endif
2918 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2919
2920 /* Make sure any changes are loaded the next time around. */
2921 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2922}
2923
2924
2925/**
2926 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2927 * only.
2928 *
2929 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2930 *
2931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2932 */
2933DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2934{
2935#ifdef IN_RING3
2936 NOREF(pVCpu);
2937#else
2938 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2939#endif
2940 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2941}
2942
2943
2944/**
2945 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2946 * read+write.
2947 *
2948 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2949 *
2950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2951 */
2952DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2953{
2954#ifdef IN_RING3
2955 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2956#else
2957 CPUMRZFpuStateActualizeForChange(pVCpu);
2958#endif
2959 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2960
2961 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2962 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2963}
2964
2965
2966/**
2967 * Stores a QNaN value into a FPU register.
2968 *
2969 * @param pReg Pointer to the register.
2970 */
2971DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2972{
2973 pReg->au32[0] = UINT32_C(0x00000000);
2974 pReg->au32[1] = UINT32_C(0xc0000000);
2975 pReg->au16[4] = UINT16_C(0xffff);
2976}
2977
2978
2979/**
2980 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
2981 *
2982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2983 * @param pFpuCtx The FPU context.
2984 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
2985 */
2986DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
2987{
2988 Assert(uFpuOpcode != UINT16_MAX);
2989 pFpuCtx->FOP = uFpuOpcode;
2990 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2991 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2992 {
2993 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2994 * happens in real mode here based on the fnsave and fnstenv images. */
2995 pFpuCtx->CS = 0;
2996 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2997 }
2998 else if (!IEM_IS_LONG_MODE(pVCpu))
2999 {
3000 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
3001 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3002 }
3003 else
3004 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3005}
3006
3007
3008/**
3009 * Marks the specified stack register as free (for FFREE).
3010 *
3011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3012 * @param iStReg The register to free.
3013 */
3014DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3015{
3016 Assert(iStReg < 8);
3017 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3018 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3019 pFpuCtx->FTW &= ~RT_BIT(iReg);
3020}
3021
3022
3023/**
3024 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3025 *
3026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3027 */
3028DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3029{
3030 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3031 uint16_t uFsw = pFpuCtx->FSW;
3032 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3033 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3034 uFsw &= ~X86_FSW_TOP_MASK;
3035 uFsw |= uTop;
3036 pFpuCtx->FSW = uFsw;
3037}
3038
3039
3040/**
3041 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3042 *
3043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3044 */
3045DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3046{
3047 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3048 uint16_t uFsw = pFpuCtx->FSW;
3049 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3050 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3051 uFsw &= ~X86_FSW_TOP_MASK;
3052 uFsw |= uTop;
3053 pFpuCtx->FSW = uFsw;
3054}
3055
3056
3057
3058
3059DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3060{
3061 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3062 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3063 if (pFpuCtx->FTW & RT_BIT(iReg))
3064 return VINF_SUCCESS;
3065 return VERR_NOT_FOUND;
3066}
3067
3068
3069DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
3070{
3071 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3072 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3073 if (pFpuCtx->FTW & RT_BIT(iReg))
3074 {
3075 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
3076 return VINF_SUCCESS;
3077 }
3078 return VERR_NOT_FOUND;
3079}
3080
3081
3082DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
3083 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
3084{
3085 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3086 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3087 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
3088 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
3089 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
3090 {
3091 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
3092 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
3093 return VINF_SUCCESS;
3094 }
3095 return VERR_NOT_FOUND;
3096}
3097
3098
3099DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
3100{
3101 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3102 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3103 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
3104 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
3105 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
3106 {
3107 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
3108 return VINF_SUCCESS;
3109 }
3110 return VERR_NOT_FOUND;
3111}
3112
3113
3114/**
3115 * Rotates the stack registers when setting new TOS.
3116 *
3117 * @param pFpuCtx The FPU context.
3118 * @param iNewTop New TOS value.
3119 * @remarks We only do this to speed up fxsave/fxrstor which
3120 * arrange the FP registers in stack order.
3121 * MUST be done before writing the new TOS (FSW).
3122 */
3123DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
3124{
3125 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3126 RTFLOAT80U ar80Temp[8];
3127
3128 if (iOldTop == iNewTop)
3129 return;
3130
3131 /* Unscrew the stack and get it into 'native' order. */
3132 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
3133 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
3134 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
3135 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
3136 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
3137 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
3138 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
3139 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
3140
3141 /* Now rotate the stack to the new position. */
3142 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
3143 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
3144 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
3145 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
3146 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
3147 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
3148 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
3149 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
3150}
3151
3152
3153/**
3154 * Updates the FPU exception status after FCW is changed.
3155 *
3156 * @param pFpuCtx The FPU context.
3157 */
3158DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
3159{
3160 uint16_t u16Fsw = pFpuCtx->FSW;
3161 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
3162 u16Fsw |= X86_FSW_ES | X86_FSW_B;
3163 else
3164 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
3165 pFpuCtx->FSW = u16Fsw;
3166}
3167
3168
3169/**
3170 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
3171 *
3172 * @returns The full FTW.
3173 * @param pFpuCtx The FPU context.
3174 */
3175DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
3176{
3177 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
3178 uint16_t u16Ftw = 0;
3179 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3180 for (unsigned iSt = 0; iSt < 8; iSt++)
3181 {
3182 unsigned const iReg = (iSt + iTop) & 7;
3183 if (!(u8Ftw & RT_BIT(iReg)))
3184 u16Ftw |= 3 << (iReg * 2); /* empty */
3185 else
3186 {
3187 uint16_t uTag;
3188 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
3189 if (pr80Reg->s.uExponent == 0x7fff)
3190 uTag = 2; /* Exponent is all 1's => Special. */
3191 else if (pr80Reg->s.uExponent == 0x0000)
3192 {
3193 if (pr80Reg->s.uMantissa == 0x0000)
3194 uTag = 1; /* All bits are zero => Zero. */
3195 else
3196 uTag = 2; /* Must be special. */
3197 }
3198 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
3199 uTag = 0; /* Valid. */
3200 else
3201 uTag = 2; /* Must be special. */
3202
3203 u16Ftw |= uTag << (iReg * 2);
3204 }
3205 }
3206
3207 return u16Ftw;
3208}
3209
3210
3211/**
3212 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
3213 *
3214 * @returns The compressed FTW.
3215 * @param u16FullFtw The full FTW to convert.
3216 */
3217DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
3218{
3219 uint8_t u8Ftw = 0;
3220 for (unsigned i = 0; i < 8; i++)
3221 {
3222 if ((u16FullFtw & 3) != 3 /*empty*/)
3223 u8Ftw |= RT_BIT(i);
3224 u16FullFtw >>= 2;
3225 }
3226
3227 return u8Ftw;
3228}
3229
3230/** @} */
3231
3232
3233#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3234
3235/**
3236 * Gets CR0 fixed-0 bits in VMX operation.
3237 *
3238 * We do this rather than fetching what we report to the guest (in
3239 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
3240 * values regardless of whether unrestricted-guest feature is available on the CPU.
3241 *
3242 * @returns CR0 fixed-0 bits.
3243 * @param pVCpu The cross context virtual CPU structure.
3244 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
3245 * must be returned. When @c false, the CR0 fixed-0
3246 * bits for VMX root mode is returned.
3247 *
3248 */
3249DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
3250{
3251 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
3252
3253 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
3254 if ( fVmxNonRootMode
3255 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
3256 return VMX_V_CR0_FIXED0_UX;
3257 return VMX_V_CR0_FIXED0;
3258}
3259
3260
3261# ifdef XAPIC_OFF_END /* Requires VBox/apic.h to be included before IEMInline.h. */
3262/**
3263 * Sets virtual-APIC write emulation as pending.
3264 *
3265 * @param pVCpu The cross context virtual CPU structure.
3266 * @param offApic The offset in the virtual-APIC page that was written.
3267 */
3268DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
3269{
3270 Assert(offApic < XAPIC_OFF_END + 4);
3271
3272 /*
3273 * Record the currently updated APIC offset, as we need this later for figuring
3274 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3275 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3276 */
3277 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3278
3279 /*
3280 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3281 * virtualization or APIC-write emulation).
3282 */
3283 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3284 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3285}
3286# endif /* XAPIC_OFF_END */
3287
3288#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3289
3290#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInline_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette