VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 101587

Last change on this file since 101587 was 101387, checked in by vboxsync, 16 months ago

VMM/IEM: Added a new class of threaded function variants, the 16f/32f/64f variants that will clear RF (and vbox internal friends) and check for TF (and vbox internal friends). The variants w/o the 'f' after the bitcount will skip this test+branch. The motivation of this was to deal with this issue that the threaded recompiler level rather than try optimize away the test+branch++ code when generating native code, make the IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32 a very simple place to start emitting native code (compared to IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS). bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 134.9 KB
Line 
1/* $Id: IEMInline.h 101387 2023-10-07 23:34:54Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
49 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
51 || rcStrict == VINF_VMX_VMEXIT
52#endif
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54 || rcStrict == VINF_SVM_VMEXIT
55#endif
56 )
57 {
58 if (pVCpu->iem.s.rcPassUp == VINF_SUCCESS)
59 rcStrict = VINF_SUCCESS;
60 else
61 {
62 pVCpu->iem.s.cRetPassUpStatus++;
63 rcStrict = pVCpu->iem.s.rcPassUp;
64 }
65 }
66 else if (RT_SUCCESS(rcStrict))
67 {
68 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
69 || rcStrict == VINF_IOM_R3_IOPORT_READ
70 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
71 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
72 || rcStrict == VINF_IOM_R3_MMIO_READ
73 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
74 || rcStrict == VINF_IOM_R3_MMIO_WRITE
75 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
76 || rcStrict == VINF_CPUM_R3_MSR_READ
77 || rcStrict == VINF_CPUM_R3_MSR_WRITE
78 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
79 || rcStrict == VINF_EM_RAW_TO_R3
80 || rcStrict == VINF_EM_TRIPLE_FAULT
81 || rcStrict == VINF_GIM_R3_HYPERCALL
82 /* raw-mode / virt handlers only: */
83 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
85 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
86 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
87 || rcStrict == VINF_SELM_SYNC_GDT
88 || rcStrict == VINF_CSAM_PENDING_ACTION
89 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
90 /* nested hw.virt codes: */
91 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
92 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
93 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
94/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
95 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
96 if (rcPassUp == VINF_SUCCESS)
97 pVCpu->iem.s.cRetInfStatuses++;
98 else if ( rcPassUp < VINF_EM_FIRST
99 || rcPassUp > VINF_EM_LAST
100 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
101 {
102 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
103 pVCpu->iem.s.cRetPassUpStatus++;
104 rcStrict = rcPassUp;
105 }
106 else
107 {
108 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
109 pVCpu->iem.s.cRetInfStatuses++;
110 }
111 }
112 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
113 pVCpu->iem.s.cRetAspectNotImplemented++;
114 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
115 pVCpu->iem.s.cRetInstrNotImplemented++;
116 else
117 pVCpu->iem.s.cRetErrStatuses++;
118 }
119 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
120 {
121 pVCpu->iem.s.cRetPassUpStatus++;
122 rcStrict = pVCpu->iem.s.rcPassUp;
123 }
124
125 /* Just clear it here as well. */
126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
127
128 return rcStrict;
129}
130
131
132/**
133 * Sets the pass up status.
134 *
135 * @returns VINF_SUCCESS.
136 * @param pVCpu The cross context virtual CPU structure of the
137 * calling thread.
138 * @param rcPassUp The pass up status. Must be informational.
139 * VINF_SUCCESS is not allowed.
140 */
141DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
142{
143 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
144
145 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
146 if (rcOldPassUp == VINF_SUCCESS)
147 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
148 /* If both are EM scheduling codes, use EM priority rules. */
149 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
150 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
151 {
152 if (rcPassUp < rcOldPassUp)
153 {
154 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
156 }
157 else
158 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
159 }
160 /* Override EM scheduling with specific status code. */
161 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
162 {
163 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
164 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
165 }
166 /* Don't override specific status code, first come first served. */
167 else
168 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
175 *
176 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
177 * reject expand down data segments and conforming code segments.
178 *
179 * ASSUMES that the CPU is in 32-bit mode.
180 *
181 * @note Will return zero when if any of the segment register state is marked
182 * external, this must be factored into assertions checking fExec
183 * consistency.
184 *
185 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
186 * @param pVCpu The cross context virtual CPU structure of the
187 * calling thread.
188 * @sa iemCalc32BitFlatIndicatorEsDs
189 */
190DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
191{
192 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
193 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
194 | pVCpu->cpum.GstCtx.cs.Attr.u
195 | pVCpu->cpum.GstCtx.ss.Attr.u
196 | pVCpu->cpum.GstCtx.ds.Attr.u)
197 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
198 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
199 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
200 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
201 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
202 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
203 == 0
204 && ( pVCpu->cpum.GstCtx.es.u64Base
205 | pVCpu->cpum.GstCtx.cs.u64Base
206 | pVCpu->cpum.GstCtx.ss.u64Base
207 | pVCpu->cpum.GstCtx.ds.u64Base)
208 == 0
209 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
210 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
211}
212
213
214/**
215 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
216 * flat already.
217 *
218 * This is used by sysenter.
219 *
220 * @note Will return zero when if any of the segment register state is marked
221 * external, this must be factored into assertions checking fExec
222 * consistency.
223 *
224 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
225 * @param pVCpu The cross context virtual CPU structure of the
226 * calling thread.
227 * @sa iemCalc32BitFlatIndicator
228 */
229DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
230{
231 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
232 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
233 | pVCpu->cpum.GstCtx.ds.Attr.u)
234 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
235 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
236 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
237 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
238 == 0
239 && ( pVCpu->cpum.GstCtx.es.u64Base
240 | pVCpu->cpum.GstCtx.ds.u64Base)
241 == 0
242 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
243 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
244}
245
246
247/**
248 * Calculates the IEM_F_MODE_XXX and CPL flags.
249 *
250 * @returns IEM_F_MODE_XXX
251 * @param pVCpu The cross context virtual CPU structure of the
252 * calling thread.
253 */
254DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
255{
256 /*
257 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
258 * here to try get this done as efficiently as possible.
259 */
260 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
261
262 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
263 {
264 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
265 {
266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
267 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
268 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
269 {
270 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
271 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
272 }
273 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
274 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
275 fExec |= IEM_F_MODE_X86_64BIT;
276 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
277 fExec |= IEM_F_MODE_X86_16BIT_PROT;
278 else
279 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
280 return fExec;
281 }
282 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
283 }
284
285 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
286 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
287 {
288 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
289 return IEM_F_MODE_X86_16BIT;
290 return IEM_F_MODE_X86_16BIT_PRE_386;
291 }
292
293 /* 32-bit unreal mode. */
294 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
295}
296
297
298/**
299 * Calculates the AMD-V and VT-x related context flags.
300 *
301 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
302 * IEM_F_X86_CTX_VMX.
303 * @param pVCpu The cross context virtual CPU structure of the
304 * calling thread.
305 */
306DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
307{
308 /*
309 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
310 * and CPUMIsGuestInNestedHwvirtMode to some extent.
311 */
312 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
313
314 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
315 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
316 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
317 if (RT_LIKELY(!fTmp))
318 return 0; /* likely */
319
320 if (fTmp & X86_CR4_VMXE)
321 {
322 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
323 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
324 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
325 return IEM_F_X86_CTX_VMX;
326 }
327
328 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
329 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
330 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
331 return IEM_F_X86_CTX_SVM;
332}
333
334#ifdef VBOX_INCLUDED_vmm_dbgf_h /* VM::dbgf.ro.cEnabledHwBreakpoints is only accessible if VBox/vmm/dbgf.h is included. */
335
336/**
337 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
338 *
339 * @returns IEM_F_BRK_PENDING_XXX or zero.
340 * @param pVCpu The cross context virtual CPU structure of the
341 * calling thread.
342 */
343DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
344{
345 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
346
347 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
348 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
349 return 0;
350 return iemCalcExecDbgFlagsSlow(pVCpu);
351}
352
353/**
354 * Calculates the the IEM_F_XXX flags.
355 *
356 * @returns IEM_F_XXX combination match the current CPU state.
357 * @param pVCpu The cross context virtual CPU structure of the
358 * calling thread.
359 */
360DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
361{
362 return iemCalcExecModeAndCplFlags(pVCpu)
363 | iemCalcExecHwVirtFlags(pVCpu)
364 /* SMM is not yet implemented */
365 | iemCalcExecDbgFlags(pVCpu)
366 ;
367}
368
369
370/**
371 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
372 *
373 * @param pVCpu The cross context virtual CPU structure of the
374 * calling thread.
375 */
376DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplFlags(PVMCPUCC pVCpu)
377{
378 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
379 | iemCalcExecModeAndCplFlags(pVCpu);
380}
381
382
383/**
384 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
385 *
386 * @param pVCpu The cross context virtual CPU structure of the
387 * calling thread.
388 */
389DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
390{
391 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
392 | iemCalcExecDbgFlags(pVCpu);
393}
394
395#endif /* VBOX_INCLUDED_vmm_dbgf_h */
396
397
398#ifndef IEM_WITH_OPAQUE_DECODER_STATE
399
400# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
401
402/**
403 * Initializes the execution state.
404 *
405 * @param pVCpu The cross context virtual CPU structure of the
406 * calling thread.
407 * @param fExecOpts Optional execution flags:
408 * - IEM_F_BYPASS_HANDLERS
409 * - IEM_F_X86_DISREGARD_LOCK
410 *
411 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
412 * side-effects in strict builds.
413 */
414DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
415{
416 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
417 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
418 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
419 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
420 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
421 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
422 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
423 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
424 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
425 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
426
427 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
428 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
429 pVCpu->iem.s.cActiveMappings = 0;
430 pVCpu->iem.s.iNextMapping = 0;
431
432# ifdef VBOX_STRICT
433 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
434 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
435 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
436 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
437 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
438 pVCpu->iem.s.uRexReg = 127;
439 pVCpu->iem.s.uRexB = 127;
440 pVCpu->iem.s.offModRm = 127;
441 pVCpu->iem.s.uRexIndex = 127;
442 pVCpu->iem.s.iEffSeg = 127;
443 pVCpu->iem.s.idxPrefix = 127;
444 pVCpu->iem.s.uVex3rdReg = 127;
445 pVCpu->iem.s.uVexLength = 127;
446 pVCpu->iem.s.fEvexStuff = 127;
447 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
448# ifdef IEM_WITH_CODE_TLB
449 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
450 pVCpu->iem.s.pbInstrBuf = NULL;
451 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
452 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
453 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
454 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
455# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
456 pVCpu->iem.s.offOpcode = 127;
457# endif
458# else
459 pVCpu->iem.s.offOpcode = 127;
460 pVCpu->iem.s.cbOpcode = 127;
461# endif
462# endif /* VBOX_STRICT */
463}
464
465
466# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
467/**
468 * Performs a minimal reinitialization of the execution state.
469 *
470 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
471 * 'world-switch' types operations on the CPU. Currently only nested
472 * hardware-virtualization uses it.
473 *
474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
475 * @param cbInstr The instruction length (for flushing).
476 */
477DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
478{
479 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
480 iemOpcodeFlushHeavy(pVCpu, cbInstr);
481}
482# endif
483
484# endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */
485
486/**
487 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
488 *
489 * @param pVCpu The cross context virtual CPU structure of the
490 * calling thread.
491 */
492DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
493{
494 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
495# ifdef VBOX_STRICT
496# ifdef IEM_WITH_CODE_TLB
497 NOREF(pVCpu);
498# else
499 pVCpu->iem.s.cbOpcode = 0;
500# endif
501# else
502 NOREF(pVCpu);
503# endif
504}
505
506
507/**
508 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
509 *
510 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
511 *
512 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
514 * @param rcStrict The status code to fiddle.
515 */
516DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
517{
518 iemUninitExec(pVCpu);
519 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
520}
521
522
523/**
524 * Macro used by the IEMExec* method to check the given instruction length.
525 *
526 * Will return on failure!
527 *
528 * @param a_cbInstr The given instruction length.
529 * @param a_cbMin The minimum length.
530 */
531# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
532 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
533 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
534
535
536# ifndef IEM_WITH_SETJMP
537
538/**
539 * Fetches the first opcode byte.
540 *
541 * @returns Strict VBox status code.
542 * @param pVCpu The cross context virtual CPU structure of the
543 * calling thread.
544 * @param pu8 Where to return the opcode byte.
545 */
546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
547{
548 /*
549 * Check for hardware instruction breakpoints.
550 */
551 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
552 { /* likely */ }
553 else
554 {
555 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
556 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
558 { /* likely */ }
559 else
560 {
561 *pu8 = 0xff; /* shut up gcc. sigh */
562 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
563 return iemRaiseDebugException(pVCpu);
564 return rcStrict;
565 }
566 }
567
568 /*
569 * Fetch the first opcode byte.
570 */
571 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
572 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
573 {
574 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
575 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
576 return VINF_SUCCESS;
577 }
578 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
579}
580
581# else /* IEM_WITH_SETJMP */
582
583/**
584 * Fetches the first opcode byte, longjmp on error.
585 *
586 * @returns The opcode byte.
587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
588 */
589DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
590{
591 /*
592 * Check for hardware instruction breakpoints.
593 */
594 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
595 { /* likely */ }
596 else
597 {
598 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
599 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
600 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
601 { /* likely */ }
602 else
603 {
604 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
605 rcStrict = iemRaiseDebugException(pVCpu);
606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
607 }
608 }
609
610 /*
611 * Fetch the first opcode byte.
612 */
613# ifdef IEM_WITH_CODE_TLB
614 uint8_t bRet;
615 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
616 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
617 if (RT_LIKELY( pbBuf != NULL
618 && offBuf < pVCpu->iem.s.cbInstrBuf))
619 {
620 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
621 bRet = pbBuf[offBuf];
622 }
623 else
624 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
625# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
626 Assert(pVCpu->iem.s.offOpcode == 0);
627 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
628# endif
629 return bRet;
630
631# else /* !IEM_WITH_CODE_TLB */
632 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
633 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
634 {
635 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
636 return pVCpu->iem.s.abOpcode[offOpcode];
637 }
638 return iemOpcodeGetNextU8SlowJmp(pVCpu);
639# endif
640}
641
642# endif /* IEM_WITH_SETJMP */
643
644/**
645 * Fetches the first opcode byte, returns/throws automatically on failure.
646 *
647 * @param a_pu8 Where to return the opcode byte.
648 * @remark Implicitly references pVCpu.
649 */
650# ifndef IEM_WITH_SETJMP
651# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
652 do \
653 { \
654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
655 if (rcStrict2 == VINF_SUCCESS) \
656 { /* likely */ } \
657 else \
658 return rcStrict2; \
659 } while (0)
660# else
661# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
662# endif /* IEM_WITH_SETJMP */
663
664
665# ifndef IEM_WITH_SETJMP
666
667/**
668 * Fetches the next opcode byte.
669 *
670 * @returns Strict VBox status code.
671 * @param pVCpu The cross context virtual CPU structure of the
672 * calling thread.
673 * @param pu8 Where to return the opcode byte.
674 */
675DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
676{
677 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
678 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
679 {
680 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
681 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
682 return VINF_SUCCESS;
683 }
684 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
685}
686
687# else /* IEM_WITH_SETJMP */
688
689/**
690 * Fetches the next opcode byte, longjmp on error.
691 *
692 * @returns The opcode byte.
693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
694 */
695DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
696{
697# ifdef IEM_WITH_CODE_TLB
698 uint8_t bRet;
699 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
700 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
701 if (RT_LIKELY( pbBuf != NULL
702 && offBuf < pVCpu->iem.s.cbInstrBuf))
703 {
704 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
705 bRet = pbBuf[offBuf];
706 }
707 else
708 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
709# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
710 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
711 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
712# endif
713 return bRet;
714
715# else /* !IEM_WITH_CODE_TLB */
716 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
717 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
718 {
719 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
720 return pVCpu->iem.s.abOpcode[offOpcode];
721 }
722 return iemOpcodeGetNextU8SlowJmp(pVCpu);
723# endif
724}
725
726# endif /* IEM_WITH_SETJMP */
727
728/**
729 * Fetches the next opcode byte, returns automatically on failure.
730 *
731 * @param a_pu8 Where to return the opcode byte.
732 * @remark Implicitly references pVCpu.
733 */
734# ifndef IEM_WITH_SETJMP
735# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
736 do \
737 { \
738 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
739 if (rcStrict2 == VINF_SUCCESS) \
740 { /* likely */ } \
741 else \
742 return rcStrict2; \
743 } while (0)
744# else
745# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
746# endif /* IEM_WITH_SETJMP */
747
748
749# ifndef IEM_WITH_SETJMP
750/**
751 * Fetches the next signed byte from the opcode stream.
752 *
753 * @returns Strict VBox status code.
754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
755 * @param pi8 Where to return the signed byte.
756 */
757DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
758{
759 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
760}
761# endif /* !IEM_WITH_SETJMP */
762
763
764/**
765 * Fetches the next signed byte from the opcode stream, returning automatically
766 * on failure.
767 *
768 * @param a_pi8 Where to return the signed byte.
769 * @remark Implicitly references pVCpu.
770 */
771# ifndef IEM_WITH_SETJMP
772# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
773 do \
774 { \
775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
776 if (rcStrict2 != VINF_SUCCESS) \
777 return rcStrict2; \
778 } while (0)
779# else /* IEM_WITH_SETJMP */
780# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
781
782# endif /* IEM_WITH_SETJMP */
783
784
785# ifndef IEM_WITH_SETJMP
786/**
787 * Fetches the next signed byte from the opcode stream, extending it to
788 * unsigned 16-bit.
789 *
790 * @returns Strict VBox status code.
791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
792 * @param pu16 Where to return the unsigned word.
793 */
794DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
795{
796 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
797 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
798 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
799
800 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
801 pVCpu->iem.s.offOpcode = offOpcode + 1;
802 return VINF_SUCCESS;
803}
804# endif /* !IEM_WITH_SETJMP */
805
806/**
807 * Fetches the next signed byte from the opcode stream and sign-extending it to
808 * a word, returning automatically on failure.
809 *
810 * @param a_pu16 Where to return the word.
811 * @remark Implicitly references pVCpu.
812 */
813# ifndef IEM_WITH_SETJMP
814# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
815 do \
816 { \
817 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
818 if (rcStrict2 != VINF_SUCCESS) \
819 return rcStrict2; \
820 } while (0)
821# else
822# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
823# endif
824
825# ifndef IEM_WITH_SETJMP
826/**
827 * Fetches the next signed byte from the opcode stream, extending it to
828 * unsigned 32-bit.
829 *
830 * @returns Strict VBox status code.
831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
832 * @param pu32 Where to return the unsigned dword.
833 */
834DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
835{
836 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
837 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
838 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
839
840 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
841 pVCpu->iem.s.offOpcode = offOpcode + 1;
842 return VINF_SUCCESS;
843}
844# endif /* !IEM_WITH_SETJMP */
845
846/**
847 * Fetches the next signed byte from the opcode stream and sign-extending it to
848 * a word, returning automatically on failure.
849 *
850 * @param a_pu32 Where to return the word.
851 * @remark Implicitly references pVCpu.
852 */
853# ifndef IEM_WITH_SETJMP
854# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
855 do \
856 { \
857 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
858 if (rcStrict2 != VINF_SUCCESS) \
859 return rcStrict2; \
860 } while (0)
861# else
862# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
863# endif
864
865
866# ifndef IEM_WITH_SETJMP
867/**
868 * Fetches the next signed byte from the opcode stream, extending it to
869 * unsigned 64-bit.
870 *
871 * @returns Strict VBox status code.
872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
873 * @param pu64 Where to return the unsigned qword.
874 */
875DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
876{
877 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
878 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
879 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
880
881 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
882 pVCpu->iem.s.offOpcode = offOpcode + 1;
883 return VINF_SUCCESS;
884}
885# endif /* !IEM_WITH_SETJMP */
886
887/**
888 * Fetches the next signed byte from the opcode stream and sign-extending it to
889 * a word, returning automatically on failure.
890 *
891 * @param a_pu64 Where to return the word.
892 * @remark Implicitly references pVCpu.
893 */
894# ifndef IEM_WITH_SETJMP
895# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
896 do \
897 { \
898 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
899 if (rcStrict2 != VINF_SUCCESS) \
900 return rcStrict2; \
901 } while (0)
902# else
903# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
904# endif
905
906
907# ifndef IEM_WITH_SETJMP
908
909/**
910 * Fetches the next opcode word.
911 *
912 * @returns Strict VBox status code.
913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
914 * @param pu16 Where to return the opcode word.
915 */
916DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
917{
918 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
919 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
920 {
921 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
922# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
923 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
924# else
925 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
926# endif
927 return VINF_SUCCESS;
928 }
929 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
930}
931
932# else /* IEM_WITH_SETJMP */
933
934/**
935 * Fetches the next opcode word, longjmp on error.
936 *
937 * @returns The opcode word.
938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
939 */
940DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
941{
942# ifdef IEM_WITH_CODE_TLB
943 uint16_t u16Ret;
944 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
945 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
946 if (RT_LIKELY( pbBuf != NULL
947 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
948 {
949 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
950# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
951 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
952# else
953 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
954# endif
955 }
956 else
957 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
958
959# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
960 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
961 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
963 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
964# else
965 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
966 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
967# endif
968 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
969# endif
970
971 return u16Ret;
972
973# else /* !IEM_WITH_CODE_TLB */
974 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
975 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
976 {
977 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
978# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
979 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
980# else
981 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
982# endif
983 }
984 return iemOpcodeGetNextU16SlowJmp(pVCpu);
985# endif /* !IEM_WITH_CODE_TLB */
986}
987
988# endif /* IEM_WITH_SETJMP */
989
990/**
991 * Fetches the next opcode word, returns automatically on failure.
992 *
993 * @param a_pu16 Where to return the opcode word.
994 * @remark Implicitly references pVCpu.
995 */
996# ifndef IEM_WITH_SETJMP
997# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
998 do \
999 { \
1000 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
1001 if (rcStrict2 != VINF_SUCCESS) \
1002 return rcStrict2; \
1003 } while (0)
1004# else
1005# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
1006# endif
1007
1008# ifndef IEM_WITH_SETJMP
1009/**
1010 * Fetches the next opcode word, zero extending it to a double word.
1011 *
1012 * @returns Strict VBox status code.
1013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1014 * @param pu32 Where to return the opcode double word.
1015 */
1016DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1017{
1018 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1019 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1020 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1021
1022 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1023 pVCpu->iem.s.offOpcode = offOpcode + 2;
1024 return VINF_SUCCESS;
1025}
1026# endif /* !IEM_WITH_SETJMP */
1027
1028/**
1029 * Fetches the next opcode word and zero extends it to a double word, returns
1030 * automatically on failure.
1031 *
1032 * @param a_pu32 Where to return the opcode double word.
1033 * @remark Implicitly references pVCpu.
1034 */
1035# ifndef IEM_WITH_SETJMP
1036# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1037 do \
1038 { \
1039 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1040 if (rcStrict2 != VINF_SUCCESS) \
1041 return rcStrict2; \
1042 } while (0)
1043# else
1044# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1045# endif
1046
1047# ifndef IEM_WITH_SETJMP
1048/**
1049 * Fetches the next opcode word, zero extending it to a quad word.
1050 *
1051 * @returns Strict VBox status code.
1052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1053 * @param pu64 Where to return the opcode quad word.
1054 */
1055DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1056{
1057 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1058 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1059 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1060
1061 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1062 pVCpu->iem.s.offOpcode = offOpcode + 2;
1063 return VINF_SUCCESS;
1064}
1065# endif /* !IEM_WITH_SETJMP */
1066
1067/**
1068 * Fetches the next opcode word and zero extends it to a quad word, returns
1069 * automatically on failure.
1070 *
1071 * @param a_pu64 Where to return the opcode quad word.
1072 * @remark Implicitly references pVCpu.
1073 */
1074# ifndef IEM_WITH_SETJMP
1075# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1076 do \
1077 { \
1078 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1079 if (rcStrict2 != VINF_SUCCESS) \
1080 return rcStrict2; \
1081 } while (0)
1082# else
1083# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1084# endif
1085
1086
1087# ifndef IEM_WITH_SETJMP
1088/**
1089 * Fetches the next signed word from the opcode stream.
1090 *
1091 * @returns Strict VBox status code.
1092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1093 * @param pi16 Where to return the signed word.
1094 */
1095DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1096{
1097 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1098}
1099# endif /* !IEM_WITH_SETJMP */
1100
1101
1102/**
1103 * Fetches the next signed word from the opcode stream, returning automatically
1104 * on failure.
1105 *
1106 * @param a_pi16 Where to return the signed word.
1107 * @remark Implicitly references pVCpu.
1108 */
1109# ifndef IEM_WITH_SETJMP
1110# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1111 do \
1112 { \
1113 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1114 if (rcStrict2 != VINF_SUCCESS) \
1115 return rcStrict2; \
1116 } while (0)
1117# else
1118# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1119# endif
1120
1121# ifndef IEM_WITH_SETJMP
1122
1123/**
1124 * Fetches the next opcode dword.
1125 *
1126 * @returns Strict VBox status code.
1127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1128 * @param pu32 Where to return the opcode double word.
1129 */
1130DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1131{
1132 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1133 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1134 {
1135 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1136# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1137 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1138# else
1139 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1140 pVCpu->iem.s.abOpcode[offOpcode + 1],
1141 pVCpu->iem.s.abOpcode[offOpcode + 2],
1142 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1143# endif
1144 return VINF_SUCCESS;
1145 }
1146 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1147}
1148
1149# else /* IEM_WITH_SETJMP */
1150
1151/**
1152 * Fetches the next opcode dword, longjmp on error.
1153 *
1154 * @returns The opcode dword.
1155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1156 */
1157DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1158{
1159# ifdef IEM_WITH_CODE_TLB
1160 uint32_t u32Ret;
1161 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1162 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1163 if (RT_LIKELY( pbBuf != NULL
1164 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1165 {
1166 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1167# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1168 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
1169# else
1170 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1171 pbBuf[offBuf + 1],
1172 pbBuf[offBuf + 2],
1173 pbBuf[offBuf + 3]);
1174# endif
1175 }
1176 else
1177 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
1178
1179# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1180 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1181 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
1182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1183 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
1184# else
1185 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
1186 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
1187 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
1188 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
1189# endif
1190 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
1191# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1192
1193 return u32Ret;
1194
1195# else /* !IEM_WITH_CODE_TLB */
1196 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1197 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1198 {
1199 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1200# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1201 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1202# else
1203 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1204 pVCpu->iem.s.abOpcode[offOpcode + 1],
1205 pVCpu->iem.s.abOpcode[offOpcode + 2],
1206 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1207# endif
1208 }
1209 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1210# endif
1211}
1212
1213# endif /* IEM_WITH_SETJMP */
1214
1215/**
1216 * Fetches the next opcode dword, returns automatically on failure.
1217 *
1218 * @param a_pu32 Where to return the opcode dword.
1219 * @remark Implicitly references pVCpu.
1220 */
1221# ifndef IEM_WITH_SETJMP
1222# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1223 do \
1224 { \
1225 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1226 if (rcStrict2 != VINF_SUCCESS) \
1227 return rcStrict2; \
1228 } while (0)
1229# else
1230# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1231# endif
1232
1233# ifndef IEM_WITH_SETJMP
1234/**
1235 * Fetches the next opcode dword, zero extending it to a quad word.
1236 *
1237 * @returns Strict VBox status code.
1238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1239 * @param pu64 Where to return the opcode quad word.
1240 */
1241DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1242{
1243 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1244 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1245 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1246
1247 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1248 pVCpu->iem.s.abOpcode[offOpcode + 1],
1249 pVCpu->iem.s.abOpcode[offOpcode + 2],
1250 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1251 pVCpu->iem.s.offOpcode = offOpcode + 4;
1252 return VINF_SUCCESS;
1253}
1254# endif /* !IEM_WITH_SETJMP */
1255
1256/**
1257 * Fetches the next opcode dword and zero extends it to a quad word, returns
1258 * automatically on failure.
1259 *
1260 * @param a_pu64 Where to return the opcode quad word.
1261 * @remark Implicitly references pVCpu.
1262 */
1263# ifndef IEM_WITH_SETJMP
1264# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1265 do \
1266 { \
1267 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1268 if (rcStrict2 != VINF_SUCCESS) \
1269 return rcStrict2; \
1270 } while (0)
1271# else
1272# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1273# endif
1274
1275
1276# ifndef IEM_WITH_SETJMP
1277/**
1278 * Fetches the next signed double word from the opcode stream.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1282 * @param pi32 Where to return the signed double word.
1283 */
1284DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1285{
1286 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1287}
1288# endif
1289
1290/**
1291 * Fetches the next signed double word from the opcode stream, returning
1292 * automatically on failure.
1293 *
1294 * @param a_pi32 Where to return the signed double word.
1295 * @remark Implicitly references pVCpu.
1296 */
1297# ifndef IEM_WITH_SETJMP
1298# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1299 do \
1300 { \
1301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1302 if (rcStrict2 != VINF_SUCCESS) \
1303 return rcStrict2; \
1304 } while (0)
1305# else
1306# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1307# endif
1308
1309# ifndef IEM_WITH_SETJMP
1310/**
1311 * Fetches the next opcode dword, sign extending it into a quad word.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1315 * @param pu64 Where to return the opcode quad word.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1318{
1319 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1320 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1321 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1322
1323 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1324 pVCpu->iem.s.abOpcode[offOpcode + 1],
1325 pVCpu->iem.s.abOpcode[offOpcode + 2],
1326 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1327 *pu64 = (uint64_t)(int64_t)i32;
1328 pVCpu->iem.s.offOpcode = offOpcode + 4;
1329 return VINF_SUCCESS;
1330}
1331# endif /* !IEM_WITH_SETJMP */
1332
1333/**
1334 * Fetches the next opcode double word and sign extends it to a quad word,
1335 * returns automatically on failure.
1336 *
1337 * @param a_pu64 Where to return the opcode quad word.
1338 * @remark Implicitly references pVCpu.
1339 */
1340# ifndef IEM_WITH_SETJMP
1341# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1342 do \
1343 { \
1344 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1345 if (rcStrict2 != VINF_SUCCESS) \
1346 return rcStrict2; \
1347 } while (0)
1348# else
1349# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1350# endif
1351
1352# ifndef IEM_WITH_SETJMP
1353
1354/**
1355 * Fetches the next opcode qword.
1356 *
1357 * @returns Strict VBox status code.
1358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1359 * @param pu64 Where to return the opcode qword.
1360 */
1361DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1362{
1363 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1364 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1365 {
1366# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1367 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1368# else
1369 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1370 pVCpu->iem.s.abOpcode[offOpcode + 1],
1371 pVCpu->iem.s.abOpcode[offOpcode + 2],
1372 pVCpu->iem.s.abOpcode[offOpcode + 3],
1373 pVCpu->iem.s.abOpcode[offOpcode + 4],
1374 pVCpu->iem.s.abOpcode[offOpcode + 5],
1375 pVCpu->iem.s.abOpcode[offOpcode + 6],
1376 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1377# endif
1378 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1379 return VINF_SUCCESS;
1380 }
1381 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1382}
1383
1384# else /* IEM_WITH_SETJMP */
1385
1386/**
1387 * Fetches the next opcode qword, longjmp on error.
1388 *
1389 * @returns The opcode qword.
1390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1391 */
1392DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1393{
1394# ifdef IEM_WITH_CODE_TLB
1395 uint64_t u64Ret;
1396 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1397 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1398 if (RT_LIKELY( pbBuf != NULL
1399 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1400 {
1401 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1402# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1403 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
1404# else
1405 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1406 pbBuf[offBuf + 1],
1407 pbBuf[offBuf + 2],
1408 pbBuf[offBuf + 3],
1409 pbBuf[offBuf + 4],
1410 pbBuf[offBuf + 5],
1411 pbBuf[offBuf + 6],
1412 pbBuf[offBuf + 7]);
1413# endif
1414 }
1415 else
1416 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
1417
1418# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1419 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1420 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
1421# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1422 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
1423# else
1424 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
1425 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
1426 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
1427 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
1428 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
1429 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
1430 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
1431 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
1432# endif
1433 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
1434# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1435
1436 return u64Ret;
1437
1438# else /* !IEM_WITH_CODE_TLB */
1439 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1440 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1441 {
1442 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1443# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1444 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1445# else
1446 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1447 pVCpu->iem.s.abOpcode[offOpcode + 1],
1448 pVCpu->iem.s.abOpcode[offOpcode + 2],
1449 pVCpu->iem.s.abOpcode[offOpcode + 3],
1450 pVCpu->iem.s.abOpcode[offOpcode + 4],
1451 pVCpu->iem.s.abOpcode[offOpcode + 5],
1452 pVCpu->iem.s.abOpcode[offOpcode + 6],
1453 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1454# endif
1455 }
1456 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1457# endif /* !IEM_WITH_CODE_TLB */
1458}
1459
1460# endif /* IEM_WITH_SETJMP */
1461
1462/**
1463 * Fetches the next opcode quad word, returns automatically on failure.
1464 *
1465 * @param a_pu64 Where to return the opcode quad word.
1466 * @remark Implicitly references pVCpu.
1467 */
1468# ifndef IEM_WITH_SETJMP
1469# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1470 do \
1471 { \
1472 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1473 if (rcStrict2 != VINF_SUCCESS) \
1474 return rcStrict2; \
1475 } while (0)
1476# else
1477# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1478# endif
1479
1480/**
1481 * For fetching the opcode bytes for an ModR/M effective address, but throw
1482 * away the result.
1483 *
1484 * This is used when decoding undefined opcodes and such where we want to avoid
1485 * unnecessary MC blocks.
1486 *
1487 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
1488 * used instead. At least for now...
1489 */
1490# ifndef IEM_WITH_SETJMP
1491# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1492 RTGCPTR GCPtrEff; \
1493 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1494 if (rcStrict != VINF_SUCCESS) \
1495 return rcStrict; \
1496 } while (0)
1497# else
1498# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1499 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1500 } while (0)
1501# endif
1502
1503#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1504
1505
1506/** @name Misc Worker Functions.
1507 * @{
1508 */
1509
1510/**
1511 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1512 * not (kind of obsolete now).
1513 *
1514 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1515 */
1516#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1517
1518/**
1519 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1520 *
1521 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1522 * @param a_fEfl The new EFLAGS.
1523 */
1524#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1525
1526
1527/**
1528 * Loads a NULL data selector into a selector register, both the hidden and
1529 * visible parts, in protected mode.
1530 *
1531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1532 * @param pSReg Pointer to the segment register.
1533 * @param uRpl The RPL.
1534 */
1535DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1536{
1537 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1538 * data selector in protected mode. */
1539 pSReg->Sel = uRpl;
1540 pSReg->ValidSel = uRpl;
1541 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1542 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1543 {
1544 /* VT-x (Intel 3960x) observed doing something like this. */
1545 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1546 pSReg->u32Limit = UINT32_MAX;
1547 pSReg->u64Base = 0;
1548 }
1549 else
1550 {
1551 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1552 pSReg->u32Limit = 0;
1553 pSReg->u64Base = 0;
1554 }
1555}
1556
1557/** @} */
1558
1559
1560/*
1561 *
1562 * Helpers routines.
1563 * Helpers routines.
1564 * Helpers routines.
1565 *
1566 */
1567
1568#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1569
1570/**
1571 * Recalculates the effective operand size.
1572 *
1573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1574 */
1575DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1576{
1577 switch (IEM_GET_CPU_MODE(pVCpu))
1578 {
1579 case IEMMODE_16BIT:
1580 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1581 break;
1582 case IEMMODE_32BIT:
1583 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1584 break;
1585 case IEMMODE_64BIT:
1586 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1587 {
1588 case 0:
1589 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1590 break;
1591 case IEM_OP_PRF_SIZE_OP:
1592 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1593 break;
1594 case IEM_OP_PRF_SIZE_REX_W:
1595 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1596 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1597 break;
1598 }
1599 break;
1600 default:
1601 AssertFailed();
1602 }
1603}
1604
1605
1606/**
1607 * Sets the default operand size to 64-bit and recalculates the effective
1608 * operand size.
1609 *
1610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1611 */
1612DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1613{
1614 Assert(IEM_IS_64BIT_CODE(pVCpu));
1615 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1616 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1617 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1618 else
1619 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1620}
1621
1622
1623/**
1624 * Sets the default operand size to 64-bit and recalculates the effective
1625 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1626 *
1627 * This is for the relative jumps.
1628 *
1629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1630 */
1631DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1632{
1633 Assert(IEM_IS_64BIT_CODE(pVCpu));
1634 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1635 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1636 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1637 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1638 else
1639 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1640}
1641
1642#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1643
1644
1645
1646/** @name Register Access.
1647 * @{
1648 */
1649
1650/**
1651 * Gets a reference (pointer) to the specified hidden segment register.
1652 *
1653 * @returns Hidden register reference.
1654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1655 * @param iSegReg The segment register.
1656 */
1657DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1658{
1659 Assert(iSegReg < X86_SREG_COUNT);
1660 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1661 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1662
1663 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1664 return pSReg;
1665}
1666
1667
1668/**
1669 * Ensures that the given hidden segment register is up to date.
1670 *
1671 * @returns Hidden register reference.
1672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1673 * @param pSReg The segment register.
1674 */
1675DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1676{
1677 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1678 NOREF(pVCpu);
1679 return pSReg;
1680}
1681
1682
1683/**
1684 * Gets a reference (pointer) to the specified segment register (the selector
1685 * value).
1686 *
1687 * @returns Pointer to the selector variable.
1688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1689 * @param iSegReg The segment register.
1690 */
1691DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1692{
1693 Assert(iSegReg < X86_SREG_COUNT);
1694 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1695 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1696}
1697
1698
1699/**
1700 * Fetches the selector value of a segment register.
1701 *
1702 * @returns The selector value.
1703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1704 * @param iSegReg The segment register.
1705 */
1706DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1707{
1708 Assert(iSegReg < X86_SREG_COUNT);
1709 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1710 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1711}
1712
1713
1714/**
1715 * Fetches the base address value of a segment register.
1716 *
1717 * @returns The selector value.
1718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1719 * @param iSegReg The segment register.
1720 */
1721DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1722{
1723 Assert(iSegReg < X86_SREG_COUNT);
1724 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1725 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1726}
1727
1728
1729/**
1730 * Gets a reference (pointer) to the specified general purpose register.
1731 *
1732 * @returns Register reference.
1733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1734 * @param iReg The general purpose register.
1735 */
1736DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1737{
1738 Assert(iReg < 16);
1739 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1740}
1741
1742
1743#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1744/**
1745 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1746 *
1747 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1748 *
1749 * @returns Register reference.
1750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1751 * @param iReg The register.
1752 */
1753DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1754{
1755 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1756 {
1757 Assert(iReg < 16);
1758 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1759 }
1760 /* high 8-bit register. */
1761 Assert(iReg < 8);
1762 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1763}
1764#endif
1765
1766
1767/**
1768 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1769 * alternative version with extended (20) register index.
1770 *
1771 * @returns Register reference.
1772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1773 * @param iRegEx The register. The 16 first are regular ones,
1774 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1775 */
1776DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1777{
1778 /** @todo This could be done by double indexing on little endian hosts:
1779 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
1780 if (iRegEx < 16)
1781 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1782
1783 /* high 8-bit register. */
1784 Assert(iRegEx < 20);
1785 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1786}
1787
1788
1789/**
1790 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1791 *
1792 * @returns Register reference.
1793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1794 * @param iReg The register.
1795 */
1796DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1797{
1798 Assert(iReg < 16);
1799 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1800}
1801
1802
1803/**
1804 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1805 *
1806 * @returns Register reference.
1807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1808 * @param iReg The register.
1809 */
1810DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1811{
1812 Assert(iReg < 16);
1813 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1814}
1815
1816
1817/**
1818 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1819 *
1820 * @returns Register reference.
1821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1822 * @param iReg The register.
1823 */
1824DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1825{
1826 Assert(iReg < 16);
1827 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1828}
1829
1830
1831/**
1832 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1833 *
1834 * @returns Register reference.
1835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1836 * @param iReg The register.
1837 */
1838DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1839{
1840 Assert(iReg < 64);
1841 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1842}
1843
1844
1845/**
1846 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1847 *
1848 * @returns Register reference.
1849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1850 * @param iReg The register.
1851 */
1852DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1853{
1854 Assert(iReg < 16);
1855 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1856}
1857
1858
1859/**
1860 * Gets a reference (pointer) to the specified segment register's base address.
1861 *
1862 * @returns Segment register base address reference.
1863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1864 * @param iSegReg The segment selector.
1865 */
1866DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1867{
1868 Assert(iSegReg < X86_SREG_COUNT);
1869 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1870 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1871}
1872
1873
1874#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1875/**
1876 * Fetches the value of a 8-bit general purpose register.
1877 *
1878 * @returns The register value.
1879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1880 * @param iReg The register.
1881 */
1882DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1883{
1884 return *iemGRegRefU8(pVCpu, iReg);
1885}
1886#endif
1887
1888
1889/**
1890 * Fetches the value of a 8-bit general purpose register, alternative version
1891 * with extended (20) register index.
1892
1893 * @returns The register value.
1894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1895 * @param iRegEx The register. The 16 first are regular ones,
1896 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1897 */
1898DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1899{
1900 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1901}
1902
1903
1904/**
1905 * Fetches the value of a 16-bit general purpose register.
1906 *
1907 * @returns The register value.
1908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1909 * @param iReg The register.
1910 */
1911DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1912{
1913 Assert(iReg < 16);
1914 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1915}
1916
1917
1918/**
1919 * Fetches the value of a 32-bit general purpose register.
1920 *
1921 * @returns The register value.
1922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1923 * @param iReg The register.
1924 */
1925DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1926{
1927 Assert(iReg < 16);
1928 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1929}
1930
1931
1932/**
1933 * Fetches the value of a 64-bit general purpose register.
1934 *
1935 * @returns The register value.
1936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1937 * @param iReg The register.
1938 */
1939DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1940{
1941 Assert(iReg < 16);
1942 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1943}
1944
1945
1946/**
1947 * Stores a 16-bit value to a general purpose register.
1948 *
1949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1950 * @param iReg The register.
1951 * @param uValue The value to store.
1952 */
1953DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
1954{
1955 Assert(iReg < 16);
1956 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
1957}
1958
1959
1960/**
1961 * Stores a 32-bit value to a general purpose register, implicitly clearing high
1962 * values.
1963 *
1964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1965 * @param iReg The register.
1966 * @param uValue The value to store.
1967 */
1968DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
1969{
1970 Assert(iReg < 16);
1971 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1972}
1973
1974
1975/**
1976 * Stores a 64-bit value to a general purpose register.
1977 *
1978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1979 * @param iReg The register.
1980 * @param uValue The value to store.
1981 */
1982DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
1983{
1984 Assert(iReg < 16);
1985 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1986}
1987
1988
1989/**
1990 * Get the address of the top of the stack.
1991 *
1992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1993 */
1994DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1995{
1996 if (IEM_IS_64BIT_CODE(pVCpu))
1997 return pVCpu->cpum.GstCtx.rsp;
1998 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1999 return pVCpu->cpum.GstCtx.esp;
2000 return pVCpu->cpum.GstCtx.sp;
2001}
2002
2003
2004/**
2005 * Updates the RIP/EIP/IP to point to the next instruction.
2006 *
2007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2008 * @param cbInstr The number of bytes to add.
2009 */
2010DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2011{
2012 /*
2013 * Advance RIP.
2014 *
2015 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
2016 * while in all other modes except LM64 the updates are 32-bit. This means
2017 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
2018 * 4GB and 64KB rollovers, and decide whether anything needs masking.
2019 *
2020 * See PC wrap around tests in bs3-cpu-weird-1.
2021 */
2022 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
2023 uint64_t const uRipNext = uRipPrev + cbInstr;
2024 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
2025 || IEM_IS_64BIT_CODE(pVCpu)))
2026 pVCpu->cpum.GstCtx.rip = uRipNext;
2027 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
2028 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
2029 else
2030 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
2031}
2032
2033
2034/**
2035 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
2036 * following EFLAGS bits are set:
2037 * - X86_EFL_RF - clear it.
2038 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
2039 * - X86_EFL_TF - generate single step \#DB trap.
2040 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
2041 * instruction).
2042 *
2043 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
2044 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
2045 * takes priority over both NMIs and hardware interrupts. So, neither is
2046 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
2047 * either unsupported will be triggered on-top of any \#DB raised here.)
2048 *
2049 * The RF flag only needs to be cleared here as it only suppresses instruction
2050 * breakpoints which are not raised here (happens synchronously during
2051 * instruction fetching).
2052 *
2053 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
2054 * status has no bearing on whether \#DB exceptions are raised.
2055 *
2056 * @note This must *NOT* be called by the two instructions setting the
2057 * CPUMCTX_INHIBIT_SHADOW_SS flag.
2058 *
2059 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
2060 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
2061 * Stacks}
2062 */
2063static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2064{
2065 /*
2066 * Normally we're just here to clear RF and/or interrupt shadow bits.
2067 */
2068 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
2069 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
2070 else
2071 {
2072 /*
2073 * Raise a #DB or/and DBGF event.
2074 */
2075 VBOXSTRICTRC rcStrict;
2076 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
2077 {
2078 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2079 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2080 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
2081 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2082 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2083 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2084 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2085 pVCpu->cpum.GstCtx.rflags.uBoth));
2086
2087 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2088 rcStrict = iemRaiseDebugException(pVCpu);
2089
2090 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2091 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2092 {
2093 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2094 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2095 }
2096 }
2097 else
2098 {
2099 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2100 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2101 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2102 }
2103 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2104 return rcStrict;
2105 }
2106 return VINF_SUCCESS;
2107}
2108
2109
2110/**
2111 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2112 *
2113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2114 */
2115DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2116{
2117 /*
2118 * We assume that most of the time nothing actually needs doing here.
2119 */
2120 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2121 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2122 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2123 return VINF_SUCCESS;
2124 return iemFinishInstructionWithFlagsSet(pVCpu);
2125}
2126
2127
2128/**
2129 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2130 * and CPUMCTX_INHIBIT_SHADOW.
2131 *
2132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2133 * @param cbInstr The number of bytes to add.
2134 */
2135DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2136{
2137 iemRegAddToRip(pVCpu, cbInstr);
2138 return iemRegFinishClearingRF(pVCpu);
2139}
2140
2141
2142/**
2143 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2144 * and CPUMCTX_INHIBIT_SHADOW.
2145 *
2146 * Only called from 64-bit code.
2147 *
2148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2149 * @param cbInstr The number of bytes to add.
2150 */
2151DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2152{
2153 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2154 return iemRegFinishClearingRF(pVCpu);
2155}
2156
2157
2158/**
2159 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2160 * CPUMCTX_INHIBIT_SHADOW.
2161 *
2162 * This is never from 64-bit code.
2163 *
2164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2165 * @param cbInstr The number of bytes to add.
2166 */
2167DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2168{
2169 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2170 return iemRegFinishClearingRF(pVCpu);
2171}
2172
2173
2174/**
2175 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
2176 * CPUMCTX_INHIBIT_SHADOW.
2177 *
2178 * This is only ever used from 16-bit code on a pre-386 CPU.
2179 *
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param cbInstr The number of bytes to add.
2182 */
2183DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2184{
2185 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2186 return iemRegFinishClearingRF(pVCpu);
2187}
2188
2189
2190/**
2191 * Tail method for a finish function that does't clear flags or raise \#DB.
2192 *
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 */
2195DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishNoFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
2196{
2197 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2198 Assert(!( pVCpu->cpum.GstCtx.eflags.uBoth
2199 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) );
2200 RT_NOREF(pVCpu);
2201 return VINF_SUCCESS;
2202}
2203
2204
2205/**
2206 * Updates the RIP to point to the next instruction, but does not need to clear
2207 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2208 *
2209 * Only called from 64-bit code.
2210 *
2211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2212 * @param cbInstr The number of bytes to add.
2213 */
2214DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2215{
2216 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2217 return iemRegFinishNoFlags(pVCpu);
2218}
2219
2220
2221/**
2222 * Updates the EIP to point to the next instruction, but does not need to clear
2223 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2224 *
2225 * This is never from 64-bit code.
2226 *
2227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2228 * @param cbInstr The number of bytes to add.
2229 */
2230DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2231{
2232 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2233 return iemRegFinishNoFlags(pVCpu);
2234}
2235
2236
2237/**
2238 * Updates the IP to point to the next instruction, but does not need to clear
2239 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2240 *
2241 * This is only ever used from 16-bit code on a pre-386 CPU.
2242 *
2243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2244 * @param cbInstr The number of bytes to add.
2245 */
2246DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2247{
2248 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2249 return iemRegFinishNoFlags(pVCpu);
2250}
2251
2252
2253/**
2254 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
2255 *
2256 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2257 * segment limit.
2258 *
2259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2260 * @param cbInstr Instruction size.
2261 * @param offNextInstr The offset of the next instruction.
2262 * @param enmEffOpSize Effective operand size.
2263 */
2264DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2265 IEMMODE enmEffOpSize) RT_NOEXCEPT
2266{
2267 Assert(IEM_IS_64BIT_CODE(pVCpu));
2268 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2269
2270 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2271 if (enmEffOpSize == IEMMODE_16BIT)
2272 uNewRip &= UINT16_MAX;
2273
2274 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2275 pVCpu->cpum.GstCtx.rip = uNewRip;
2276 else
2277 return iemRaiseGeneralProtectionFault0(pVCpu);
2278
2279#ifndef IEM_WITH_CODE_TLB
2280 iemOpcodeFlushLight(pVCpu, cbInstr);
2281#endif
2282
2283 /*
2284 * Clear RF and finish the instruction (maybe raise #DB).
2285 */
2286 return iemRegFinishClearingRF(pVCpu);
2287}
2288
2289
2290/**
2291 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2292 * code (never 64-bit).
2293 *
2294 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2295 * segment limit.
2296 *
2297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2298 * @param cbInstr Instruction size.
2299 * @param offNextInstr The offset of the next instruction.
2300 * @param enmEffOpSize Effective operand size.
2301 */
2302DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2303 IEMMODE enmEffOpSize) RT_NOEXCEPT
2304{
2305 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2306 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2307
2308 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2309 if (enmEffOpSize == IEMMODE_16BIT)
2310 uNewEip &= UINT16_MAX;
2311 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2312 pVCpu->cpum.GstCtx.rip = uNewEip;
2313 else
2314 return iemRaiseGeneralProtectionFault0(pVCpu);
2315
2316#ifndef IEM_WITH_CODE_TLB
2317 iemOpcodeFlushLight(pVCpu, cbInstr);
2318#endif
2319
2320 /*
2321 * Clear RF and finish the instruction (maybe raise #DB).
2322 */
2323 return iemRegFinishClearingRF(pVCpu);
2324}
2325
2326
2327/**
2328 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
2329 *
2330 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2331 * segment limit.
2332 *
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 * @param cbInstr Instruction size.
2335 * @param offNextInstr The offset of the next instruction.
2336 */
2337DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2338 int8_t offNextInstr) RT_NOEXCEPT
2339{
2340 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2341
2342 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2343 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2344 pVCpu->cpum.GstCtx.rip = uNewIp;
2345 else
2346 return iemRaiseGeneralProtectionFault0(pVCpu);
2347
2348#ifndef IEM_WITH_CODE_TLB
2349 iemOpcodeFlushLight(pVCpu, cbInstr);
2350#endif
2351
2352 /*
2353 * Clear RF and finish the instruction (maybe raise #DB).
2354 */
2355 return iemRegFinishClearingRF(pVCpu);
2356}
2357
2358
2359/**
2360 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
2361 * clearing of flags.
2362 *
2363 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2364 * segment limit.
2365 *
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 * @param cbInstr Instruction size.
2368 * @param offNextInstr The offset of the next instruction.
2369 * @param enmEffOpSize Effective operand size.
2370 */
2371DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2372 IEMMODE enmEffOpSize) RT_NOEXCEPT
2373{
2374 Assert(IEM_IS_64BIT_CODE(pVCpu));
2375 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2376
2377 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2378 if (enmEffOpSize == IEMMODE_16BIT)
2379 uNewRip &= UINT16_MAX;
2380
2381 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2382 pVCpu->cpum.GstCtx.rip = uNewRip;
2383 else
2384 return iemRaiseGeneralProtectionFault0(pVCpu);
2385
2386#ifndef IEM_WITH_CODE_TLB
2387 iemOpcodeFlushLight(pVCpu, cbInstr);
2388#endif
2389 return iemRegFinishNoFlags(pVCpu);
2390}
2391
2392
2393/**
2394 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2395 * code (never 64-bit), no checking or clearing of flags.
2396 *
2397 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2398 * segment limit.
2399 *
2400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2401 * @param cbInstr Instruction size.
2402 * @param offNextInstr The offset of the next instruction.
2403 * @param enmEffOpSize Effective operand size.
2404 */
2405DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2406 IEMMODE enmEffOpSize) RT_NOEXCEPT
2407{
2408 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2409 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2410
2411 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2412 if (enmEffOpSize == IEMMODE_16BIT)
2413 uNewEip &= UINT16_MAX;
2414 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2415 pVCpu->cpum.GstCtx.rip = uNewEip;
2416 else
2417 return iemRaiseGeneralProtectionFault0(pVCpu);
2418
2419#ifndef IEM_WITH_CODE_TLB
2420 iemOpcodeFlushLight(pVCpu, cbInstr);
2421#endif
2422 return iemRegFinishNoFlags(pVCpu);
2423}
2424
2425
2426/**
2427 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
2428 * clearing of flags.
2429 *
2430 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2431 * segment limit.
2432 *
2433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2434 * @param cbInstr Instruction size.
2435 * @param offNextInstr The offset of the next instruction.
2436 */
2437DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2438 int8_t offNextInstr) RT_NOEXCEPT
2439{
2440 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2441
2442 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2443 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2444 pVCpu->cpum.GstCtx.rip = uNewIp;
2445 else
2446 return iemRaiseGeneralProtectionFault0(pVCpu);
2447
2448#ifndef IEM_WITH_CODE_TLB
2449 iemOpcodeFlushLight(pVCpu, cbInstr);
2450#endif
2451 return iemRegFinishNoFlags(pVCpu);
2452}
2453
2454
2455/**
2456 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
2457 *
2458 * @returns Strict VBox status code.
2459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2460 * @param cbInstr Instruction size.
2461 * @param offNextInstr The offset of the next instruction.
2462 */
2463DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2464 int16_t offNextInstr) RT_NOEXCEPT
2465{
2466 Assert(IEM_IS_64BIT_CODE(pVCpu));
2467
2468 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2469
2470#ifndef IEM_WITH_CODE_TLB
2471 iemOpcodeFlushLight(pVCpu, cbInstr);
2472#endif
2473
2474 /*
2475 * Clear RF and finish the instruction (maybe raise #DB).
2476 */
2477 return iemRegFinishClearingRF(pVCpu);
2478}
2479
2480
2481/**
2482 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
2483 *
2484 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2485 * segment limit.
2486 *
2487 * @returns Strict VBox status code.
2488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2489 * @param cbInstr Instruction size.
2490 * @param offNextInstr The offset of the next instruction.
2491 *
2492 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2493 * identical.
2494 */
2495DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2496 int16_t offNextInstr) RT_NOEXCEPT
2497{
2498 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2499
2500 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2501 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2502 pVCpu->cpum.GstCtx.rip = uNewIp;
2503 else
2504 return iemRaiseGeneralProtectionFault0(pVCpu);
2505
2506#ifndef IEM_WITH_CODE_TLB
2507 iemOpcodeFlushLight(pVCpu, cbInstr);
2508#endif
2509
2510 /*
2511 * Clear RF and finish the instruction (maybe raise #DB).
2512 */
2513 return iemRegFinishClearingRF(pVCpu);
2514}
2515
2516
2517/**
2518 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
2519 * clearing of flags.
2520 *
2521 * @returns Strict VBox status code.
2522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2523 * @param cbInstr Instruction size.
2524 * @param offNextInstr The offset of the next instruction.
2525 */
2526DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2527 int16_t offNextInstr) RT_NOEXCEPT
2528{
2529 Assert(IEM_IS_64BIT_CODE(pVCpu));
2530
2531 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2532
2533#ifndef IEM_WITH_CODE_TLB
2534 iemOpcodeFlushLight(pVCpu, cbInstr);
2535#endif
2536 return iemRegFinishNoFlags(pVCpu);
2537}
2538
2539
2540/**
2541 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
2542 * no checking or clearing of flags.
2543 *
2544 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2545 * segment limit.
2546 *
2547 * @returns Strict VBox status code.
2548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2549 * @param cbInstr Instruction size.
2550 * @param offNextInstr The offset of the next instruction.
2551 *
2552 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2553 * identical.
2554 */
2555DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2556 int16_t offNextInstr) RT_NOEXCEPT
2557{
2558 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2559
2560 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2561 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2562 pVCpu->cpum.GstCtx.rip = uNewIp;
2563 else
2564 return iemRaiseGeneralProtectionFault0(pVCpu);
2565
2566#ifndef IEM_WITH_CODE_TLB
2567 iemOpcodeFlushLight(pVCpu, cbInstr);
2568#endif
2569 return iemRegFinishNoFlags(pVCpu);
2570}
2571
2572
2573/**
2574 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2575 *
2576 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2577 * segment limit.
2578 *
2579 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2580 * only alternative for relative jumps in 64-bit code and that is already
2581 * handled in the decoder stage.
2582 *
2583 * @returns Strict VBox status code.
2584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2585 * @param cbInstr Instruction size.
2586 * @param offNextInstr The offset of the next instruction.
2587 */
2588DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2589 int32_t offNextInstr) RT_NOEXCEPT
2590{
2591 Assert(IEM_IS_64BIT_CODE(pVCpu));
2592
2593 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2594 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2595 pVCpu->cpum.GstCtx.rip = uNewRip;
2596 else
2597 return iemRaiseGeneralProtectionFault0(pVCpu);
2598
2599#ifndef IEM_WITH_CODE_TLB
2600 iemOpcodeFlushLight(pVCpu, cbInstr);
2601#endif
2602
2603 /*
2604 * Clear RF and finish the instruction (maybe raise #DB).
2605 */
2606 return iemRegFinishClearingRF(pVCpu);
2607}
2608
2609
2610/**
2611 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2612 *
2613 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2614 * segment limit.
2615 *
2616 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2617 * only alternative for relative jumps in 32-bit code and that is already
2618 * handled in the decoder stage.
2619 *
2620 * @returns Strict VBox status code.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 * @param cbInstr Instruction size.
2623 * @param offNextInstr The offset of the next instruction.
2624 */
2625DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2626 int32_t offNextInstr) RT_NOEXCEPT
2627{
2628 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2629 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2630
2631 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2632 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2633 pVCpu->cpum.GstCtx.rip = uNewEip;
2634 else
2635 return iemRaiseGeneralProtectionFault0(pVCpu);
2636
2637#ifndef IEM_WITH_CODE_TLB
2638 iemOpcodeFlushLight(pVCpu, cbInstr);
2639#endif
2640
2641 /*
2642 * Clear RF and finish the instruction (maybe raise #DB).
2643 */
2644 return iemRegFinishClearingRF(pVCpu);
2645}
2646
2647
2648/**
2649 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
2650 * clearing of flags.
2651 *
2652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2653 * segment limit.
2654 *
2655 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2656 * only alternative for relative jumps in 64-bit code and that is already
2657 * handled in the decoder stage.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param cbInstr Instruction size.
2662 * @param offNextInstr The offset of the next instruction.
2663 */
2664DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2665 int32_t offNextInstr) RT_NOEXCEPT
2666{
2667 Assert(IEM_IS_64BIT_CODE(pVCpu));
2668
2669 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2670 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2671 pVCpu->cpum.GstCtx.rip = uNewRip;
2672 else
2673 return iemRaiseGeneralProtectionFault0(pVCpu);
2674
2675#ifndef IEM_WITH_CODE_TLB
2676 iemOpcodeFlushLight(pVCpu, cbInstr);
2677#endif
2678 return iemRegFinishNoFlags(pVCpu);
2679}
2680
2681
2682/**
2683 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
2684 * clearing of flags.
2685 *
2686 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2687 * segment limit.
2688 *
2689 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2690 * only alternative for relative jumps in 32-bit code and that is already
2691 * handled in the decoder stage.
2692 *
2693 * @returns Strict VBox status code.
2694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2695 * @param cbInstr Instruction size.
2696 * @param offNextInstr The offset of the next instruction.
2697 */
2698DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2699 int32_t offNextInstr) RT_NOEXCEPT
2700{
2701 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2702 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2703
2704 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2705 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2706 pVCpu->cpum.GstCtx.rip = uNewEip;
2707 else
2708 return iemRaiseGeneralProtectionFault0(pVCpu);
2709
2710#ifndef IEM_WITH_CODE_TLB
2711 iemOpcodeFlushLight(pVCpu, cbInstr);
2712#endif
2713 return iemRegFinishNoFlags(pVCpu);
2714}
2715
2716
2717/**
2718 * Extended version of iemFinishInstructionWithFlagsSet that goes with
2719 * iemRegAddToRipAndFinishingClearingRfEx.
2720 *
2721 * See iemFinishInstructionWithFlagsSet() for details.
2722 */
2723static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2724{
2725 /*
2726 * Raise a #DB.
2727 */
2728 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2729 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2730 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
2731 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2732 /** @todo Do we set all pending \#DB events, or just one? */
2733 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
2734 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2735 pVCpu->cpum.GstCtx.rflags.uBoth));
2736 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2737 return iemRaiseDebugException(pVCpu);
2738}
2739
2740
2741/**
2742 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
2743 * others potentially updating EFLAGS.TF.
2744 *
2745 * The single step event must be generated using the TF value at the start of
2746 * the instruction, not the new value set by it.
2747 *
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param cbInstr The number of bytes to add.
2750 * @param fEflOld The EFLAGS at the start of the instruction
2751 * execution.
2752 */
2753DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
2754{
2755 iemRegAddToRip(pVCpu, cbInstr);
2756 if (!(fEflOld & X86_EFL_TF))
2757 return iemRegFinishClearingRF(pVCpu);
2758 return iemFinishInstructionWithTfSet(pVCpu);
2759}
2760
2761
2762#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2763/**
2764 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
2765 *
2766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2767 */
2768DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2769{
2770 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2771}
2772#endif
2773
2774
2775/**
2776 * Adds to the stack pointer.
2777 *
2778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2779 * @param cbToAdd The number of bytes to add (8-bit!).
2780 */
2781DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
2782{
2783 if (IEM_IS_64BIT_CODE(pVCpu))
2784 pVCpu->cpum.GstCtx.rsp += cbToAdd;
2785 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2786 pVCpu->cpum.GstCtx.esp += cbToAdd;
2787 else
2788 pVCpu->cpum.GstCtx.sp += cbToAdd;
2789}
2790
2791
2792/**
2793 * Subtracts from the stack pointer.
2794 *
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 * @param cbToSub The number of bytes to subtract (8-bit!).
2797 */
2798DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
2799{
2800 if (IEM_IS_64BIT_CODE(pVCpu))
2801 pVCpu->cpum.GstCtx.rsp -= cbToSub;
2802 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2803 pVCpu->cpum.GstCtx.esp -= cbToSub;
2804 else
2805 pVCpu->cpum.GstCtx.sp -= cbToSub;
2806}
2807
2808
2809/**
2810 * Adds to the temporary stack pointer.
2811 *
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2814 * @param cbToAdd The number of bytes to add (16-bit).
2815 */
2816DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
2817{
2818 if (IEM_IS_64BIT_CODE(pVCpu))
2819 pTmpRsp->u += cbToAdd;
2820 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2821 pTmpRsp->DWords.dw0 += cbToAdd;
2822 else
2823 pTmpRsp->Words.w0 += cbToAdd;
2824}
2825
2826
2827/**
2828 * Subtracts from the temporary stack pointer.
2829 *
2830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2831 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2832 * @param cbToSub The number of bytes to subtract.
2833 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
2834 * expecting that.
2835 */
2836DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
2837{
2838 if (IEM_IS_64BIT_CODE(pVCpu))
2839 pTmpRsp->u -= cbToSub;
2840 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2841 pTmpRsp->DWords.dw0 -= cbToSub;
2842 else
2843 pTmpRsp->Words.w0 -= cbToSub;
2844}
2845
2846
2847/**
2848 * Calculates the effective stack address for a push of the specified size as
2849 * well as the new RSP value (upper bits may be masked).
2850 *
2851 * @returns Effective stack addressf for the push.
2852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2853 * @param cbItem The size of the stack item to pop.
2854 * @param puNewRsp Where to return the new RSP value.
2855 */
2856DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2857{
2858 RTUINT64U uTmpRsp;
2859 RTGCPTR GCPtrTop;
2860 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2861
2862 if (IEM_IS_64BIT_CODE(pVCpu))
2863 GCPtrTop = uTmpRsp.u -= cbItem;
2864 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2865 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2866 else
2867 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2868 *puNewRsp = uTmpRsp.u;
2869 return GCPtrTop;
2870}
2871
2872
2873/**
2874 * Gets the current stack pointer and calculates the value after a pop of the
2875 * specified size.
2876 *
2877 * @returns Current stack pointer.
2878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2879 * @param cbItem The size of the stack item to pop.
2880 * @param puNewRsp Where to return the new RSP value.
2881 */
2882DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2883{
2884 RTUINT64U uTmpRsp;
2885 RTGCPTR GCPtrTop;
2886 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2887
2888 if (IEM_IS_64BIT_CODE(pVCpu))
2889 {
2890 GCPtrTop = uTmpRsp.u;
2891 uTmpRsp.u += cbItem;
2892 }
2893 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2894 {
2895 GCPtrTop = uTmpRsp.DWords.dw0;
2896 uTmpRsp.DWords.dw0 += cbItem;
2897 }
2898 else
2899 {
2900 GCPtrTop = uTmpRsp.Words.w0;
2901 uTmpRsp.Words.w0 += cbItem;
2902 }
2903 *puNewRsp = uTmpRsp.u;
2904 return GCPtrTop;
2905}
2906
2907
2908/**
2909 * Calculates the effective stack address for a push of the specified size as
2910 * well as the new temporary RSP value (upper bits may be masked).
2911 *
2912 * @returns Effective stack addressf for the push.
2913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2914 * @param pTmpRsp The temporary stack pointer. This is updated.
2915 * @param cbItem The size of the stack item to pop.
2916 */
2917DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2918{
2919 RTGCPTR GCPtrTop;
2920
2921 if (IEM_IS_64BIT_CODE(pVCpu))
2922 GCPtrTop = pTmpRsp->u -= cbItem;
2923 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2924 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2925 else
2926 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2927 return GCPtrTop;
2928}
2929
2930
2931/**
2932 * Gets the effective stack address for a pop of the specified size and
2933 * calculates and updates the temporary RSP.
2934 *
2935 * @returns Current stack pointer.
2936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2937 * @param pTmpRsp The temporary stack pointer. This is updated.
2938 * @param cbItem The size of the stack item to pop.
2939 */
2940DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2941{
2942 RTGCPTR GCPtrTop;
2943 if (IEM_IS_64BIT_CODE(pVCpu))
2944 {
2945 GCPtrTop = pTmpRsp->u;
2946 pTmpRsp->u += cbItem;
2947 }
2948 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2949 {
2950 GCPtrTop = pTmpRsp->DWords.dw0;
2951 pTmpRsp->DWords.dw0 += cbItem;
2952 }
2953 else
2954 {
2955 GCPtrTop = pTmpRsp->Words.w0;
2956 pTmpRsp->Words.w0 += cbItem;
2957 }
2958 return GCPtrTop;
2959}
2960
2961/** @} */
2962
2963
2964/** @name FPU access and helpers.
2965 *
2966 * @{
2967 */
2968
2969
2970/**
2971 * Hook for preparing to use the host FPU.
2972 *
2973 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2974 *
2975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2976 */
2977DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2978{
2979#ifdef IN_RING3
2980 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2981#else
2982 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2983#endif
2984 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2985}
2986
2987
2988/**
2989 * Hook for preparing to use the host FPU for SSE.
2990 *
2991 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2992 *
2993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2994 */
2995DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2996{
2997 iemFpuPrepareUsage(pVCpu);
2998}
2999
3000
3001/**
3002 * Hook for preparing to use the host FPU for AVX.
3003 *
3004 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3005 *
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 */
3008DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
3009{
3010 iemFpuPrepareUsage(pVCpu);
3011}
3012
3013
3014/**
3015 * Hook for actualizing the guest FPU state before the interpreter reads it.
3016 *
3017 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3018 *
3019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3020 */
3021DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3022{
3023#ifdef IN_RING3
3024 NOREF(pVCpu);
3025#else
3026 CPUMRZFpuStateActualizeForRead(pVCpu);
3027#endif
3028 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3029}
3030
3031
3032/**
3033 * Hook for actualizing the guest FPU state before the interpreter changes it.
3034 *
3035 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3036 *
3037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3038 */
3039DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3040{
3041#ifdef IN_RING3
3042 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3043#else
3044 CPUMRZFpuStateActualizeForChange(pVCpu);
3045#endif
3046 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3047}
3048
3049
3050/**
3051 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
3052 * only.
3053 *
3054 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3055 *
3056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3057 */
3058DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3059{
3060#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
3061 NOREF(pVCpu);
3062#else
3063 CPUMRZFpuStateActualizeSseForRead(pVCpu);
3064#endif
3065 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3066}
3067
3068
3069/**
3070 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
3071 * read+write.
3072 *
3073 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3074 *
3075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3076 */
3077DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3078{
3079#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
3080 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3081#else
3082 CPUMRZFpuStateActualizeForChange(pVCpu);
3083#endif
3084 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3085
3086 /* Make sure any changes are loaded the next time around. */
3087 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
3088}
3089
3090
3091/**
3092 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
3093 * only.
3094 *
3095 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3096 *
3097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3098 */
3099DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3100{
3101#ifdef IN_RING3
3102 NOREF(pVCpu);
3103#else
3104 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
3105#endif
3106 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3107}
3108
3109
3110/**
3111 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
3112 * read+write.
3113 *
3114 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3115 *
3116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3117 */
3118DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3119{
3120#ifdef IN_RING3
3121 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3122#else
3123 CPUMRZFpuStateActualizeForChange(pVCpu);
3124#endif
3125 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3126
3127 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
3128 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
3129}
3130
3131
3132/**
3133 * Stores a QNaN value into a FPU register.
3134 *
3135 * @param pReg Pointer to the register.
3136 */
3137DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
3138{
3139 pReg->au32[0] = UINT32_C(0x00000000);
3140 pReg->au32[1] = UINT32_C(0xc0000000);
3141 pReg->au16[4] = UINT16_C(0xffff);
3142}
3143
3144
3145/**
3146 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
3147 *
3148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3149 * @param pFpuCtx The FPU context.
3150 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
3151 */
3152DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
3153{
3154 Assert(uFpuOpcode != UINT16_MAX);
3155 pFpuCtx->FOP = uFpuOpcode;
3156 /** @todo x87.CS and FPUIP needs to be kept seperately. */
3157 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3158 {
3159 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3160 * happens in real mode here based on the fnsave and fnstenv images. */
3161 pFpuCtx->CS = 0;
3162 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
3163 }
3164 else if (!IEM_IS_LONG_MODE(pVCpu))
3165 {
3166 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
3167 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3168 }
3169 else
3170 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3171}
3172
3173
3174/**
3175 * Marks the specified stack register as free (for FFREE).
3176 *
3177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3178 * @param iStReg The register to free.
3179 */
3180DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3181{
3182 Assert(iStReg < 8);
3183 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3184 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3185 pFpuCtx->FTW &= ~RT_BIT(iReg);
3186}
3187
3188
3189/**
3190 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3191 *
3192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3193 */
3194DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3195{
3196 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3197 uint16_t uFsw = pFpuCtx->FSW;
3198 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3199 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3200 uFsw &= ~X86_FSW_TOP_MASK;
3201 uFsw |= uTop;
3202 pFpuCtx->FSW = uFsw;
3203}
3204
3205
3206/**
3207 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3208 *
3209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3210 */
3211DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3212{
3213 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3214 uint16_t uFsw = pFpuCtx->FSW;
3215 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3216 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3217 uFsw &= ~X86_FSW_TOP_MASK;
3218 uFsw |= uTop;
3219 pFpuCtx->FSW = uFsw;
3220}
3221
3222
3223
3224
3225DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3226{
3227 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3228 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3229 if (pFpuCtx->FTW & RT_BIT(iReg))
3230 return VINF_SUCCESS;
3231 return VERR_NOT_FOUND;
3232}
3233
3234
3235DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
3236{
3237 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3238 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3239 if (pFpuCtx->FTW & RT_BIT(iReg))
3240 {
3241 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
3242 return VINF_SUCCESS;
3243 }
3244 return VERR_NOT_FOUND;
3245}
3246
3247
3248DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
3249 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
3250{
3251 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3252 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3253 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
3254 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
3255 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
3256 {
3257 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
3258 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
3259 return VINF_SUCCESS;
3260 }
3261 return VERR_NOT_FOUND;
3262}
3263
3264
3265DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
3266{
3267 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3268 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3269 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
3270 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
3271 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
3272 {
3273 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
3274 return VINF_SUCCESS;
3275 }
3276 return VERR_NOT_FOUND;
3277}
3278
3279
3280/**
3281 * Rotates the stack registers when setting new TOS.
3282 *
3283 * @param pFpuCtx The FPU context.
3284 * @param iNewTop New TOS value.
3285 * @remarks We only do this to speed up fxsave/fxrstor which
3286 * arrange the FP registers in stack order.
3287 * MUST be done before writing the new TOS (FSW).
3288 */
3289DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
3290{
3291 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3292 RTFLOAT80U ar80Temp[8];
3293
3294 if (iOldTop == iNewTop)
3295 return;
3296
3297 /* Unscrew the stack and get it into 'native' order. */
3298 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
3299 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
3300 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
3301 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
3302 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
3303 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
3304 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
3305 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
3306
3307 /* Now rotate the stack to the new position. */
3308 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
3309 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
3310 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
3311 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
3312 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
3313 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
3314 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
3315 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
3316}
3317
3318
3319/**
3320 * Updates the FPU exception status after FCW is changed.
3321 *
3322 * @param pFpuCtx The FPU context.
3323 */
3324DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
3325{
3326 uint16_t u16Fsw = pFpuCtx->FSW;
3327 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
3328 u16Fsw |= X86_FSW_ES | X86_FSW_B;
3329 else
3330 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
3331 pFpuCtx->FSW = u16Fsw;
3332}
3333
3334
3335/**
3336 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
3337 *
3338 * @returns The full FTW.
3339 * @param pFpuCtx The FPU context.
3340 */
3341DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
3342{
3343 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
3344 uint16_t u16Ftw = 0;
3345 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3346 for (unsigned iSt = 0; iSt < 8; iSt++)
3347 {
3348 unsigned const iReg = (iSt + iTop) & 7;
3349 if (!(u8Ftw & RT_BIT(iReg)))
3350 u16Ftw |= 3 << (iReg * 2); /* empty */
3351 else
3352 {
3353 uint16_t uTag;
3354 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
3355 if (pr80Reg->s.uExponent == 0x7fff)
3356 uTag = 2; /* Exponent is all 1's => Special. */
3357 else if (pr80Reg->s.uExponent == 0x0000)
3358 {
3359 if (pr80Reg->s.uMantissa == 0x0000)
3360 uTag = 1; /* All bits are zero => Zero. */
3361 else
3362 uTag = 2; /* Must be special. */
3363 }
3364 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
3365 uTag = 0; /* Valid. */
3366 else
3367 uTag = 2; /* Must be special. */
3368
3369 u16Ftw |= uTag << (iReg * 2);
3370 }
3371 }
3372
3373 return u16Ftw;
3374}
3375
3376
3377/**
3378 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
3379 *
3380 * @returns The compressed FTW.
3381 * @param u16FullFtw The full FTW to convert.
3382 */
3383DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
3384{
3385 uint8_t u8Ftw = 0;
3386 for (unsigned i = 0; i < 8; i++)
3387 {
3388 if ((u16FullFtw & 3) != 3 /*empty*/)
3389 u8Ftw |= RT_BIT(i);
3390 u16FullFtw >>= 2;
3391 }
3392
3393 return u8Ftw;
3394}
3395
3396/** @} */
3397
3398
3399/** @name Memory access.
3400 *
3401 * @{
3402 */
3403
3404
3405/**
3406 * Checks whether alignment checks are enabled or not.
3407 *
3408 * @returns true if enabled, false if not.
3409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3410 */
3411DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
3412{
3413 AssertCompile(X86_CR0_AM == X86_EFL_AC);
3414 return IEM_GET_CPL(pVCpu) == 3
3415 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
3416}
3417
3418/**
3419 * Checks if the given segment can be written to, raise the appropriate
3420 * exception if not.
3421 *
3422 * @returns VBox strict status code.
3423 *
3424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3425 * @param pHid Pointer to the hidden register.
3426 * @param iSegReg The register number.
3427 * @param pu64BaseAddr Where to return the base address to use for the
3428 * segment. (In 64-bit code it may differ from the
3429 * base in the hidden segment.)
3430 */
3431DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3432 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3433{
3434 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3435
3436 if (IEM_IS_64BIT_CODE(pVCpu))
3437 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3438 else
3439 {
3440 if (!pHid->Attr.n.u1Present)
3441 {
3442 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3443 AssertRelease(uSel == 0);
3444 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3445 return iemRaiseGeneralProtectionFault0(pVCpu);
3446 }
3447
3448 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3449 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3450 && !IEM_IS_64BIT_CODE(pVCpu) )
3451 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3452 *pu64BaseAddr = pHid->u64Base;
3453 }
3454 return VINF_SUCCESS;
3455}
3456
3457
3458/**
3459 * Checks if the given segment can be read from, raise the appropriate
3460 * exception if not.
3461 *
3462 * @returns VBox strict status code.
3463 *
3464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3465 * @param pHid Pointer to the hidden register.
3466 * @param iSegReg The register number.
3467 * @param pu64BaseAddr Where to return the base address to use for the
3468 * segment. (In 64-bit code it may differ from the
3469 * base in the hidden segment.)
3470 */
3471DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3472 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3473{
3474 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3475
3476 if (IEM_IS_64BIT_CODE(pVCpu))
3477 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3478 else
3479 {
3480 if (!pHid->Attr.n.u1Present)
3481 {
3482 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3483 AssertRelease(uSel == 0);
3484 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3485 return iemRaiseGeneralProtectionFault0(pVCpu);
3486 }
3487
3488 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3489 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3490 *pu64BaseAddr = pHid->u64Base;
3491 }
3492 return VINF_SUCCESS;
3493}
3494
3495
3496/**
3497 * Maps a physical page.
3498 *
3499 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3501 * @param GCPhysMem The physical address.
3502 * @param fAccess The intended access.
3503 * @param ppvMem Where to return the mapping address.
3504 * @param pLock The PGM lock.
3505 */
3506DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3507 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3508{
3509#ifdef IEM_LOG_MEMORY_WRITES
3510 if (fAccess & IEM_ACCESS_TYPE_WRITE)
3511 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3512#endif
3513
3514 /** @todo This API may require some improving later. A private deal with PGM
3515 * regarding locking and unlocking needs to be struct. A couple of TLBs
3516 * living in PGM, but with publicly accessible inlined access methods
3517 * could perhaps be an even better solution. */
3518 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
3519 GCPhysMem,
3520 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3521 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
3522 ppvMem,
3523 pLock);
3524 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
3525 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3526
3527 return rc;
3528}
3529
3530
3531/**
3532 * Unmap a page previously mapped by iemMemPageMap.
3533 *
3534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3535 * @param GCPhysMem The physical address.
3536 * @param fAccess The intended access.
3537 * @param pvMem What iemMemPageMap returned.
3538 * @param pLock The PGM lock.
3539 */
3540DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3541 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3542{
3543 NOREF(pVCpu);
3544 NOREF(GCPhysMem);
3545 NOREF(fAccess);
3546 NOREF(pvMem);
3547 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
3548}
3549
3550#ifdef IEM_WITH_SETJMP
3551
3552/** @todo slim this down */
3553DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
3554 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3555{
3556 Assert(cbMem >= 1);
3557 Assert(iSegReg < X86_SREG_COUNT);
3558
3559 /*
3560 * 64-bit mode is simpler.
3561 */
3562 if (IEM_IS_64BIT_CODE(pVCpu))
3563 {
3564 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
3565 {
3566 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3567 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3568 GCPtrMem += pSel->u64Base;
3569 }
3570
3571 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3572 return GCPtrMem;
3573 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3574 }
3575 /*
3576 * 16-bit and 32-bit segmentation.
3577 */
3578 else if (iSegReg != UINT8_MAX)
3579 {
3580 /** @todo Does this apply to segments with 4G-1 limit? */
3581 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3582 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
3583 {
3584 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3585 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3586 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3587 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
3588 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
3589 | X86_SEL_TYPE_CODE))
3590 {
3591 case X86DESCATTR_P: /* readonly data, expand up */
3592 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
3593 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
3594 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
3595 /* expand up */
3596 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
3597 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3598 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
3599 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
3600 break;
3601
3602 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
3603 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
3604 /* expand down */
3605 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3606 && ( pSel->Attr.n.u1DefBig
3607 || GCPtrLast32 <= UINT32_C(0xffff)) ))
3608 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3609 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
3610 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
3611 break;
3612
3613 default:
3614 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
3615 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3616 break;
3617 }
3618 }
3619 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
3620 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3621 }
3622 /*
3623 * 32-bit flat address.
3624 */
3625 else
3626 return GCPtrMem;
3627}
3628
3629
3630/** @todo slim this down */
3631DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
3632 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3633{
3634 Assert(cbMem >= 1);
3635 Assert(iSegReg < X86_SREG_COUNT);
3636
3637 /*
3638 * 64-bit mode is simpler.
3639 */
3640 if (IEM_IS_64BIT_CODE(pVCpu))
3641 {
3642 if (iSegReg >= X86_SREG_FS)
3643 {
3644 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3645 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3646 GCPtrMem += pSel->u64Base;
3647 }
3648
3649 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3650 return GCPtrMem;
3651 }
3652 /*
3653 * 16-bit and 32-bit segmentation.
3654 */
3655 else
3656 {
3657 Assert(GCPtrMem <= UINT32_MAX);
3658 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3659 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3660 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3661 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
3662 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
3663 /** @todo explore exactly how the CS stuff works in real mode. See also
3664 * http://www.rcollins.org/Productivity/DescriptorCache.html and
3665 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
3666 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
3667 {
3668 /* expand up */
3669 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3670 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
3671 && GCPtrLast32 >= (uint32_t)GCPtrMem))
3672 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3673 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3674 }
3675 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
3676 {
3677 /* expand down - the uppger boundary is defined by the B bit, not G. */
3678 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3679 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
3680 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
3681 && GCPtrLast32 >= (uint32_t)GCPtrMem))
3682 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3683 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3684 }
3685 else
3686 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3687 }
3688 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3689}
3690
3691#endif /* IEM_WITH_SETJMP */
3692
3693/**
3694 * Fakes a long mode stack selector for SS = 0.
3695 *
3696 * @param pDescSs Where to return the fake stack descriptor.
3697 * @param uDpl The DPL we want.
3698 */
3699DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
3700{
3701 pDescSs->Long.au64[0] = 0;
3702 pDescSs->Long.au64[1] = 0;
3703 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3704 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
3705 pDescSs->Long.Gen.u2Dpl = uDpl;
3706 pDescSs->Long.Gen.u1Present = 1;
3707 pDescSs->Long.Gen.u1Long = 1;
3708}
3709
3710
3711/*
3712 * Unmap helpers.
3713 */
3714
3715#ifdef IEM_WITH_SETJMP
3716
3717DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3718{
3719# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3720 if (RT_LIKELY(bMapInfo == 0))
3721 return;
3722# endif
3723 iemMemCommitAndUnmapRwSafeJmp(pVCpu, pvMem, bMapInfo);
3724}
3725
3726
3727DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3728{
3729# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3730 if (RT_LIKELY(bMapInfo == 0))
3731 return;
3732# endif
3733 iemMemCommitAndUnmapWoSafeJmp(pVCpu, pvMem, bMapInfo);
3734}
3735
3736
3737DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3738{
3739# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3740 if (RT_LIKELY(bMapInfo == 0))
3741 return;
3742# endif
3743 iemMemCommitAndUnmapRoSafeJmp(pVCpu, pvMem, bMapInfo);
3744}
3745
3746#endif /* IEM_WITH_SETJMP */
3747
3748
3749/*
3750 * Instantiate R/W inline templates.
3751 */
3752
3753/** @def TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
3754 * Used to check if an unaligned access is if within the page and won't
3755 * trigger an \#AC.
3756 *
3757 * This can be used to deal with misaligned accesses on platforms that are
3758 * senstive to such if desires.
3759 */
3760AssertCompile(X86_CR0_AM == X86_EFL_AC);
3761AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
3762#if 1
3763# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) \
3764 ( ((a_GCPtrEff) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(a_TmplMemType) \
3765 && !( (uint32_t)(a_pVCpu)->cpum.GstCtx.cr0 \
3766 & (a_pVCpu)->cpum.GstCtx.eflags.u \
3767 & ((IEM_GET_CPL((a_pVCpu)) + 1U) << 16) /* IEM_GET_CPL(a_pVCpu) == 3 ? X86_CR0_AM : 0 */ \
3768 & X86_CR0_AM) )
3769#else
3770# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
3771#endif
3772
3773#define TMPL_MEM_TYPE uint8_t
3774#define TMPL_MEM_TYPE_ALIGN 0
3775#define TMPL_MEM_TYPE_SIZE 1
3776#define TMPL_MEM_FN_SUFF U8
3777#define TMPL_MEM_FMT_TYPE "%#04x"
3778#define TMPL_MEM_FMT_DESC "byte"
3779#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3780
3781#define TMPL_MEM_WITH_STACK
3782
3783#define TMPL_MEM_TYPE uint16_t
3784#define TMPL_MEM_TYPE_ALIGN 1
3785#define TMPL_MEM_TYPE_SIZE 2
3786#define TMPL_MEM_FN_SUFF U16
3787#define TMPL_MEM_FMT_TYPE "%#06x"
3788#define TMPL_MEM_FMT_DESC "word"
3789#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3790
3791#define TMPL_WITH_PUSH_SREG
3792#define TMPL_MEM_TYPE uint32_t
3793#define TMPL_MEM_TYPE_ALIGN 3
3794#define TMPL_MEM_TYPE_SIZE 4
3795#define TMPL_MEM_FN_SUFF U32
3796#define TMPL_MEM_FMT_TYPE "%#010x"
3797#define TMPL_MEM_FMT_DESC "dword"
3798#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3799#undef TMPL_WITH_PUSH_SREG
3800
3801#define TMPL_MEM_TYPE uint64_t
3802#define TMPL_MEM_TYPE_ALIGN 7
3803#define TMPL_MEM_TYPE_SIZE 8
3804#define TMPL_MEM_FN_SUFF U64
3805#define TMPL_MEM_FMT_TYPE "%#018RX64"
3806#define TMPL_MEM_FMT_DESC "qword"
3807#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3808
3809#undef TMPL_MEM_WITH_STACK
3810
3811#define TMPL_MEM_NO_STORE
3812#define TMPL_MEM_NO_MAPPING
3813#define TMPL_MEM_TYPE uint64_t
3814#define TMPL_MEM_TYPE_ALIGN 15
3815#define TMPL_MEM_TYPE_SIZE 8
3816#define TMPL_MEM_FN_SUFF U64AlignedU128
3817#define TMPL_MEM_FMT_TYPE "%#018RX64"
3818#define TMPL_MEM_FMT_DESC "qword"
3819#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3820
3821#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
3822
3823/** @} */
3824
3825
3826#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3827
3828/**
3829 * Gets CR0 fixed-0 bits in VMX operation.
3830 *
3831 * We do this rather than fetching what we report to the guest (in
3832 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
3833 * values regardless of whether unrestricted-guest feature is available on the CPU.
3834 *
3835 * @returns CR0 fixed-0 bits.
3836 * @param pVCpu The cross context virtual CPU structure.
3837 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
3838 * must be returned. When @c false, the CR0 fixed-0
3839 * bits for VMX root mode is returned.
3840 *
3841 */
3842DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
3843{
3844 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
3845
3846 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
3847 if ( fVmxNonRootMode
3848 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
3849 return VMX_V_CR0_FIXED0_UX;
3850 return VMX_V_CR0_FIXED0;
3851}
3852
3853
3854# ifdef XAPIC_OFF_END /* Requires VBox/apic.h to be included before IEMInline.h. */
3855/**
3856 * Sets virtual-APIC write emulation as pending.
3857 *
3858 * @param pVCpu The cross context virtual CPU structure.
3859 * @param offApic The offset in the virtual-APIC page that was written.
3860 */
3861DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
3862{
3863 Assert(offApic < XAPIC_OFF_END + 4);
3864
3865 /*
3866 * Record the currently updated APIC offset, as we need this later for figuring
3867 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3868 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3869 */
3870 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3871
3872 /*
3873 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3874 * virtualization or APIC-write emulation).
3875 */
3876 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3877 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3878}
3879# endif /* XAPIC_OFF_END */
3880
3881#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3882
3883#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette