VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 102977

Last change on this file since 102977 was 102977, checked in by vboxsync, 11 months ago

VMM/IEM: Implemented generic fallback for misaligned x86 locking that is not compatible with the host. Using the existing split-lock solution with VINF_EM_EMULATE_SPLIT_LOCK from bugref:10052. We keep ignoring the 'lock' prefix in the recompiler for single CPU VMs (now also on amd64 hosts). bugref:10547

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.2 KB
Line 
1/* $Id: IEMInline.h 102977 2024-01-19 23:11:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
49 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
51 || rcStrict == VINF_VMX_VMEXIT
52#endif
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54 || rcStrict == VINF_SVM_VMEXIT
55#endif
56 )
57 {
58 rcStrict = pVCpu->iem.s.rcPassUp;
59 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
60 { /* likely */ }
61 else
62 pVCpu->iem.s.cRetPassUpStatus++;
63 }
64 else if (RT_SUCCESS(rcStrict))
65 {
66 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
67 || rcStrict == VINF_IOM_R3_IOPORT_READ
68 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
69 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
70 || rcStrict == VINF_IOM_R3_MMIO_READ
71 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
72 || rcStrict == VINF_IOM_R3_MMIO_WRITE
73 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
74 || rcStrict == VINF_CPUM_R3_MSR_READ
75 || rcStrict == VINF_CPUM_R3_MSR_WRITE
76 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
77 || rcStrict == VINF_EM_RAW_TO_R3
78 || rcStrict == VINF_EM_TRIPLE_FAULT
79 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK
80 || rcStrict == VINF_GIM_R3_HYPERCALL
81 /* raw-mode / virt handlers only: */
82 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
83 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
85 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
86 || rcStrict == VINF_SELM_SYNC_GDT
87 || rcStrict == VINF_CSAM_PENDING_ACTION
88 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
89 /* nested hw.virt codes: */
90 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
91 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
92 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
93/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
94 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
95 if (rcPassUp == VINF_SUCCESS)
96 pVCpu->iem.s.cRetInfStatuses++;
97 else if ( rcPassUp < VINF_EM_FIRST
98 || rcPassUp > VINF_EM_LAST
99 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
100 {
101 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
102 pVCpu->iem.s.cRetPassUpStatus++;
103 rcStrict = rcPassUp;
104 }
105 else
106 {
107 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
108 pVCpu->iem.s.cRetInfStatuses++;
109 }
110 }
111 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
112 pVCpu->iem.s.cRetAspectNotImplemented++;
113 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
114 pVCpu->iem.s.cRetInstrNotImplemented++;
115 else
116 pVCpu->iem.s.cRetErrStatuses++;
117 }
118 else
119 {
120 rcStrict = pVCpu->iem.s.rcPassUp;
121 if (rcStrict != VINF_SUCCESS)
122 pVCpu->iem.s.cRetPassUpStatus++;
123 }
124
125 /* Just clear it here as well. */
126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
127
128 return rcStrict;
129}
130
131
132/**
133 * Sets the pass up status.
134 *
135 * @returns VINF_SUCCESS.
136 * @param pVCpu The cross context virtual CPU structure of the
137 * calling thread.
138 * @param rcPassUp The pass up status. Must be informational.
139 * VINF_SUCCESS is not allowed.
140 */
141DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
142{
143 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
144
145 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
146 if (rcOldPassUp == VINF_SUCCESS)
147 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
148 /* If both are EM scheduling codes, use EM priority rules. */
149 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
150 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
151 {
152 if (rcPassUp < rcOldPassUp)
153 {
154 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
156 }
157 else
158 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
159 }
160 /* Override EM scheduling with specific status code. */
161 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
162 {
163 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
164 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
165 }
166 /* Don't override specific status code, first come first served. */
167 else
168 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
175 *
176 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
177 * reject expand down data segments and conforming code segments.
178 *
179 * ASSUMES that the CPU is in 32-bit mode.
180 *
181 * @note Will return zero when if any of the segment register state is marked
182 * external, this must be factored into assertions checking fExec
183 * consistency.
184 *
185 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
186 * @param pVCpu The cross context virtual CPU structure of the
187 * calling thread.
188 * @sa iemCalc32BitFlatIndicatorEsDs
189 */
190DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
191{
192 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
193 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
194 | pVCpu->cpum.GstCtx.cs.Attr.u
195 | pVCpu->cpum.GstCtx.ss.Attr.u
196 | pVCpu->cpum.GstCtx.ds.Attr.u)
197 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
198 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
199 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
200 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
201 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
202 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
203 == 0
204 && ( pVCpu->cpum.GstCtx.es.u64Base
205 | pVCpu->cpum.GstCtx.cs.u64Base
206 | pVCpu->cpum.GstCtx.ss.u64Base
207 | pVCpu->cpum.GstCtx.ds.u64Base)
208 == 0
209 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
210 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
211}
212
213
214/**
215 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
216 * flat already.
217 *
218 * This is used by sysenter.
219 *
220 * @note Will return zero when if any of the segment register state is marked
221 * external, this must be factored into assertions checking fExec
222 * consistency.
223 *
224 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
225 * @param pVCpu The cross context virtual CPU structure of the
226 * calling thread.
227 * @sa iemCalc32BitFlatIndicator
228 */
229DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
230{
231 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
232 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
233 | pVCpu->cpum.GstCtx.ds.Attr.u)
234 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
235 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
236 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
237 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
238 == 0
239 && ( pVCpu->cpum.GstCtx.es.u64Base
240 | pVCpu->cpum.GstCtx.ds.u64Base)
241 == 0
242 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
243 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
244}
245
246
247/**
248 * Calculates the IEM_F_MODE_XXX and CPL flags.
249 *
250 * @returns IEM_F_MODE_XXX
251 * @param pVCpu The cross context virtual CPU structure of the
252 * calling thread.
253 */
254DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
255{
256 /*
257 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
258 * here to try get this done as efficiently as possible.
259 */
260 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
261
262 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
263 {
264 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
265 {
266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
267 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
268 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
269 {
270 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
271 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
272 }
273 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
274 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
275 fExec |= IEM_F_MODE_X86_64BIT;
276 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
277 fExec |= IEM_F_MODE_X86_16BIT_PROT;
278 else
279 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
280 return fExec;
281 }
282 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
283 }
284
285 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
286 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
287 {
288 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
289 return IEM_F_MODE_X86_16BIT;
290 return IEM_F_MODE_X86_16BIT_PRE_386;
291 }
292
293 /* 32-bit unreal mode. */
294 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
295}
296
297
298/**
299 * Calculates the AMD-V and VT-x related context flags.
300 *
301 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
302 * IEM_F_X86_CTX_VMX.
303 * @param pVCpu The cross context virtual CPU structure of the
304 * calling thread.
305 */
306DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
307{
308 /*
309 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
310 * and CPUMIsGuestInNestedHwvirtMode to some extent.
311 */
312 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
313
314 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
315 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
316 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
317 if (RT_LIKELY(!fTmp))
318 return 0; /* likely */
319
320 if (fTmp & X86_CR4_VMXE)
321 {
322 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
323 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
324 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
325 return IEM_F_X86_CTX_VMX;
326 }
327
328 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
329 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
330 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
331 return IEM_F_X86_CTX_SVM;
332}
333
334#ifdef VBOX_INCLUDED_vmm_dbgf_h /* VM::dbgf.ro.cEnabledHwBreakpoints is only accessible if VBox/vmm/dbgf.h is included. */
335
336/**
337 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
338 *
339 * @returns IEM_F_BRK_PENDING_XXX or zero.
340 * @param pVCpu The cross context virtual CPU structure of the
341 * calling thread.
342 */
343DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
344{
345 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
346
347 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
348 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
349 return 0;
350 return iemCalcExecDbgFlagsSlow(pVCpu);
351}
352
353/**
354 * Calculates the the IEM_F_XXX flags.
355 *
356 * @returns IEM_F_XXX combination match the current CPU state.
357 * @param pVCpu The cross context virtual CPU structure of the
358 * calling thread.
359 */
360DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
361{
362 return iemCalcExecModeAndCplFlags(pVCpu)
363 | iemCalcExecHwVirtFlags(pVCpu)
364 /* SMM is not yet implemented */
365 | iemCalcExecDbgFlags(pVCpu)
366 ;
367}
368
369
370/**
371 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
372 *
373 * @param pVCpu The cross context virtual CPU structure of the
374 * calling thread.
375 */
376DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplFlags(PVMCPUCC pVCpu)
377{
378 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
379 | iemCalcExecModeAndCplFlags(pVCpu);
380}
381
382
383/**
384 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
385 *
386 * @param pVCpu The cross context virtual CPU structure of the
387 * calling thread.
388 */
389DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
390{
391 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
392 | iemCalcExecDbgFlags(pVCpu);
393}
394
395#endif /* VBOX_INCLUDED_vmm_dbgf_h */
396
397
398#ifndef IEM_WITH_OPAQUE_DECODER_STATE
399
400# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
401
402/**
403 * Initializes the execution state.
404 *
405 * @param pVCpu The cross context virtual CPU structure of the
406 * calling thread.
407 * @param fExecOpts Optional execution flags:
408 * - IEM_F_BYPASS_HANDLERS
409 * - IEM_F_X86_DISREGARD_LOCK
410 *
411 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
412 * side-effects in strict builds.
413 */
414DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
415{
416 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
417 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
418 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
419 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
420 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
421 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
422 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
423 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
424 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
425 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
426
427 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
428 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
429 pVCpu->iem.s.cActiveMappings = 0;
430 pVCpu->iem.s.iNextMapping = 0;
431
432# ifdef VBOX_STRICT
433 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
434 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
435 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
436 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
437 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
438 pVCpu->iem.s.uRexReg = 127;
439 pVCpu->iem.s.uRexB = 127;
440 pVCpu->iem.s.offModRm = 127;
441 pVCpu->iem.s.uRexIndex = 127;
442 pVCpu->iem.s.iEffSeg = 127;
443 pVCpu->iem.s.idxPrefix = 127;
444 pVCpu->iem.s.uVex3rdReg = 127;
445 pVCpu->iem.s.uVexLength = 127;
446 pVCpu->iem.s.fEvexStuff = 127;
447 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
448# ifdef IEM_WITH_CODE_TLB
449 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
450 pVCpu->iem.s.pbInstrBuf = NULL;
451 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
452 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
453 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
454 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
455# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
456 pVCpu->iem.s.offOpcode = 127;
457# endif
458# else
459 pVCpu->iem.s.offOpcode = 127;
460 pVCpu->iem.s.cbOpcode = 127;
461# endif
462# endif /* VBOX_STRICT */
463}
464
465
466# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
467/**
468 * Performs a minimal reinitialization of the execution state.
469 *
470 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
471 * 'world-switch' types operations on the CPU. Currently only nested
472 * hardware-virtualization uses it.
473 *
474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
475 * @param cbInstr The instruction length (for flushing).
476 */
477DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
478{
479 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
480 iemOpcodeFlushHeavy(pVCpu, cbInstr);
481}
482# endif
483
484# endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */
485
486/**
487 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
488 *
489 * @param pVCpu The cross context virtual CPU structure of the
490 * calling thread.
491 */
492DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
493{
494 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
495# ifdef VBOX_STRICT
496# ifdef IEM_WITH_CODE_TLB
497 NOREF(pVCpu);
498# else
499 pVCpu->iem.s.cbOpcode = 0;
500# endif
501# else
502 NOREF(pVCpu);
503# endif
504}
505
506
507/**
508 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
509 *
510 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
511 *
512 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
514 * @param rcStrict The status code to fiddle.
515 */
516DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
517{
518 iemUninitExec(pVCpu);
519 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
520}
521
522
523/**
524 * Macro used by the IEMExec* method to check the given instruction length.
525 *
526 * Will return on failure!
527 *
528 * @param a_cbInstr The given instruction length.
529 * @param a_cbMin The minimum length.
530 */
531# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
532 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
533 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
534
535
536# ifndef IEM_WITH_SETJMP
537
538/**
539 * Fetches the first opcode byte.
540 *
541 * @returns Strict VBox status code.
542 * @param pVCpu The cross context virtual CPU structure of the
543 * calling thread.
544 * @param pu8 Where to return the opcode byte.
545 */
546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
547{
548 /*
549 * Check for hardware instruction breakpoints.
550 */
551 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
552 { /* likely */ }
553 else
554 {
555 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
556 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
558 { /* likely */ }
559 else
560 {
561 *pu8 = 0xff; /* shut up gcc. sigh */
562 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
563 return iemRaiseDebugException(pVCpu);
564 return rcStrict;
565 }
566 }
567
568 /*
569 * Fetch the first opcode byte.
570 */
571 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
572 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
573 {
574 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
575 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
576 return VINF_SUCCESS;
577 }
578 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
579}
580
581# else /* IEM_WITH_SETJMP */
582
583/**
584 * Fetches the first opcode byte, longjmp on error.
585 *
586 * @returns The opcode byte.
587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
588 */
589DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
590{
591 /*
592 * Check for hardware instruction breakpoints.
593 */
594 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
595 { /* likely */ }
596 else
597 {
598 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
599 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
600 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
601 { /* likely */ }
602 else
603 {
604 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
605 rcStrict = iemRaiseDebugException(pVCpu);
606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
607 }
608 }
609
610 /*
611 * Fetch the first opcode byte.
612 */
613# ifdef IEM_WITH_CODE_TLB
614 uint8_t bRet;
615 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
616 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
617 if (RT_LIKELY( pbBuf != NULL
618 && offBuf < pVCpu->iem.s.cbInstrBuf))
619 {
620 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
621 bRet = pbBuf[offBuf];
622 }
623 else
624 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
625# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
626 Assert(pVCpu->iem.s.offOpcode == 0);
627 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
628# endif
629 return bRet;
630
631# else /* !IEM_WITH_CODE_TLB */
632 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
633 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
634 {
635 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
636 return pVCpu->iem.s.abOpcode[offOpcode];
637 }
638 return iemOpcodeGetNextU8SlowJmp(pVCpu);
639# endif
640}
641
642# endif /* IEM_WITH_SETJMP */
643
644/**
645 * Fetches the first opcode byte, returns/throws automatically on failure.
646 *
647 * @param a_pu8 Where to return the opcode byte.
648 * @remark Implicitly references pVCpu.
649 */
650# ifndef IEM_WITH_SETJMP
651# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
652 do \
653 { \
654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
655 if (rcStrict2 == VINF_SUCCESS) \
656 { /* likely */ } \
657 else \
658 return rcStrict2; \
659 } while (0)
660# else
661# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
662# endif /* IEM_WITH_SETJMP */
663
664
665# ifndef IEM_WITH_SETJMP
666
667/**
668 * Fetches the next opcode byte.
669 *
670 * @returns Strict VBox status code.
671 * @param pVCpu The cross context virtual CPU structure of the
672 * calling thread.
673 * @param pu8 Where to return the opcode byte.
674 */
675DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
676{
677 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
678 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
679 {
680 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
681 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
682 return VINF_SUCCESS;
683 }
684 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
685}
686
687# else /* IEM_WITH_SETJMP */
688
689/**
690 * Fetches the next opcode byte, longjmp on error.
691 *
692 * @returns The opcode byte.
693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
694 */
695DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
696{
697# ifdef IEM_WITH_CODE_TLB
698 uint8_t bRet;
699 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
700 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
701 if (RT_LIKELY( pbBuf != NULL
702 && offBuf < pVCpu->iem.s.cbInstrBuf))
703 {
704 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
705 bRet = pbBuf[offBuf];
706 }
707 else
708 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
709# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
710 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
711 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
712# endif
713 return bRet;
714
715# else /* !IEM_WITH_CODE_TLB */
716 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
717 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
718 {
719 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
720 return pVCpu->iem.s.abOpcode[offOpcode];
721 }
722 return iemOpcodeGetNextU8SlowJmp(pVCpu);
723# endif
724}
725
726# endif /* IEM_WITH_SETJMP */
727
728/**
729 * Fetches the next opcode byte, returns automatically on failure.
730 *
731 * @param a_pu8 Where to return the opcode byte.
732 * @remark Implicitly references pVCpu.
733 */
734# ifndef IEM_WITH_SETJMP
735# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
736 do \
737 { \
738 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
739 if (rcStrict2 == VINF_SUCCESS) \
740 { /* likely */ } \
741 else \
742 return rcStrict2; \
743 } while (0)
744# else
745# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
746# endif /* IEM_WITH_SETJMP */
747
748
749# ifndef IEM_WITH_SETJMP
750/**
751 * Fetches the next signed byte from the opcode stream.
752 *
753 * @returns Strict VBox status code.
754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
755 * @param pi8 Where to return the signed byte.
756 */
757DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
758{
759 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
760}
761# endif /* !IEM_WITH_SETJMP */
762
763
764/**
765 * Fetches the next signed byte from the opcode stream, returning automatically
766 * on failure.
767 *
768 * @param a_pi8 Where to return the signed byte.
769 * @remark Implicitly references pVCpu.
770 */
771# ifndef IEM_WITH_SETJMP
772# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
773 do \
774 { \
775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
776 if (rcStrict2 != VINF_SUCCESS) \
777 return rcStrict2; \
778 } while (0)
779# else /* IEM_WITH_SETJMP */
780# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
781
782# endif /* IEM_WITH_SETJMP */
783
784
785# ifndef IEM_WITH_SETJMP
786/**
787 * Fetches the next signed byte from the opcode stream, extending it to
788 * unsigned 16-bit.
789 *
790 * @returns Strict VBox status code.
791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
792 * @param pu16 Where to return the unsigned word.
793 */
794DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
795{
796 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
797 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
798 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
799
800 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
801 pVCpu->iem.s.offOpcode = offOpcode + 1;
802 return VINF_SUCCESS;
803}
804# endif /* !IEM_WITH_SETJMP */
805
806/**
807 * Fetches the next signed byte from the opcode stream and sign-extending it to
808 * a word, returning automatically on failure.
809 *
810 * @param a_pu16 Where to return the word.
811 * @remark Implicitly references pVCpu.
812 */
813# ifndef IEM_WITH_SETJMP
814# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
815 do \
816 { \
817 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
818 if (rcStrict2 != VINF_SUCCESS) \
819 return rcStrict2; \
820 } while (0)
821# else
822# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
823# endif
824
825# ifndef IEM_WITH_SETJMP
826/**
827 * Fetches the next signed byte from the opcode stream, extending it to
828 * unsigned 32-bit.
829 *
830 * @returns Strict VBox status code.
831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
832 * @param pu32 Where to return the unsigned dword.
833 */
834DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
835{
836 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
837 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
838 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
839
840 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
841 pVCpu->iem.s.offOpcode = offOpcode + 1;
842 return VINF_SUCCESS;
843}
844# endif /* !IEM_WITH_SETJMP */
845
846/**
847 * Fetches the next signed byte from the opcode stream and sign-extending it to
848 * a word, returning automatically on failure.
849 *
850 * @param a_pu32 Where to return the word.
851 * @remark Implicitly references pVCpu.
852 */
853# ifndef IEM_WITH_SETJMP
854# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
855 do \
856 { \
857 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
858 if (rcStrict2 != VINF_SUCCESS) \
859 return rcStrict2; \
860 } while (0)
861# else
862# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
863# endif
864
865
866# ifndef IEM_WITH_SETJMP
867/**
868 * Fetches the next signed byte from the opcode stream, extending it to
869 * unsigned 64-bit.
870 *
871 * @returns Strict VBox status code.
872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
873 * @param pu64 Where to return the unsigned qword.
874 */
875DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
876{
877 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
878 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
879 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
880
881 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
882 pVCpu->iem.s.offOpcode = offOpcode + 1;
883 return VINF_SUCCESS;
884}
885# endif /* !IEM_WITH_SETJMP */
886
887/**
888 * Fetches the next signed byte from the opcode stream and sign-extending it to
889 * a word, returning automatically on failure.
890 *
891 * @param a_pu64 Where to return the word.
892 * @remark Implicitly references pVCpu.
893 */
894# ifndef IEM_WITH_SETJMP
895# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
896 do \
897 { \
898 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
899 if (rcStrict2 != VINF_SUCCESS) \
900 return rcStrict2; \
901 } while (0)
902# else
903# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
904# endif
905
906
907# ifndef IEM_WITH_SETJMP
908
909/**
910 * Fetches the next opcode word.
911 *
912 * @returns Strict VBox status code.
913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
914 * @param pu16 Where to return the opcode word.
915 */
916DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
917{
918 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
919 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
920 {
921 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
922# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
923 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
924# else
925 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
926# endif
927 return VINF_SUCCESS;
928 }
929 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
930}
931
932# else /* IEM_WITH_SETJMP */
933
934/**
935 * Fetches the next opcode word, longjmp on error.
936 *
937 * @returns The opcode word.
938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
939 */
940DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
941{
942# ifdef IEM_WITH_CODE_TLB
943 uint16_t u16Ret;
944 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
945 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
946 if (RT_LIKELY( pbBuf != NULL
947 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
948 {
949 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
950# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
951 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
952# else
953 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
954# endif
955 }
956 else
957 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
958
959# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
960 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
961 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
963 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
964# else
965 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
966 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
967# endif
968 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
969# endif
970
971 return u16Ret;
972
973# else /* !IEM_WITH_CODE_TLB */
974 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
975 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
976 {
977 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
978# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
979 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
980# else
981 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
982# endif
983 }
984 return iemOpcodeGetNextU16SlowJmp(pVCpu);
985# endif /* !IEM_WITH_CODE_TLB */
986}
987
988# endif /* IEM_WITH_SETJMP */
989
990/**
991 * Fetches the next opcode word, returns automatically on failure.
992 *
993 * @param a_pu16 Where to return the opcode word.
994 * @remark Implicitly references pVCpu.
995 */
996# ifndef IEM_WITH_SETJMP
997# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
998 do \
999 { \
1000 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
1001 if (rcStrict2 != VINF_SUCCESS) \
1002 return rcStrict2; \
1003 } while (0)
1004# else
1005# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
1006# endif
1007
1008# ifndef IEM_WITH_SETJMP
1009/**
1010 * Fetches the next opcode word, zero extending it to a double word.
1011 *
1012 * @returns Strict VBox status code.
1013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1014 * @param pu32 Where to return the opcode double word.
1015 */
1016DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1017{
1018 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1019 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1020 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1021
1022 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1023 pVCpu->iem.s.offOpcode = offOpcode + 2;
1024 return VINF_SUCCESS;
1025}
1026# endif /* !IEM_WITH_SETJMP */
1027
1028/**
1029 * Fetches the next opcode word and zero extends it to a double word, returns
1030 * automatically on failure.
1031 *
1032 * @param a_pu32 Where to return the opcode double word.
1033 * @remark Implicitly references pVCpu.
1034 */
1035# ifndef IEM_WITH_SETJMP
1036# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1037 do \
1038 { \
1039 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1040 if (rcStrict2 != VINF_SUCCESS) \
1041 return rcStrict2; \
1042 } while (0)
1043# else
1044# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1045# endif
1046
1047# ifndef IEM_WITH_SETJMP
1048/**
1049 * Fetches the next opcode word, zero extending it to a quad word.
1050 *
1051 * @returns Strict VBox status code.
1052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1053 * @param pu64 Where to return the opcode quad word.
1054 */
1055DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1056{
1057 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1058 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1059 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1060
1061 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1062 pVCpu->iem.s.offOpcode = offOpcode + 2;
1063 return VINF_SUCCESS;
1064}
1065# endif /* !IEM_WITH_SETJMP */
1066
1067/**
1068 * Fetches the next opcode word and zero extends it to a quad word, returns
1069 * automatically on failure.
1070 *
1071 * @param a_pu64 Where to return the opcode quad word.
1072 * @remark Implicitly references pVCpu.
1073 */
1074# ifndef IEM_WITH_SETJMP
1075# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1076 do \
1077 { \
1078 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1079 if (rcStrict2 != VINF_SUCCESS) \
1080 return rcStrict2; \
1081 } while (0)
1082# else
1083# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1084# endif
1085
1086
1087# ifndef IEM_WITH_SETJMP
1088/**
1089 * Fetches the next signed word from the opcode stream.
1090 *
1091 * @returns Strict VBox status code.
1092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1093 * @param pi16 Where to return the signed word.
1094 */
1095DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1096{
1097 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1098}
1099# endif /* !IEM_WITH_SETJMP */
1100
1101
1102/**
1103 * Fetches the next signed word from the opcode stream, returning automatically
1104 * on failure.
1105 *
1106 * @param a_pi16 Where to return the signed word.
1107 * @remark Implicitly references pVCpu.
1108 */
1109# ifndef IEM_WITH_SETJMP
1110# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1111 do \
1112 { \
1113 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1114 if (rcStrict2 != VINF_SUCCESS) \
1115 return rcStrict2; \
1116 } while (0)
1117# else
1118# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1119# endif
1120
1121# ifndef IEM_WITH_SETJMP
1122
1123/**
1124 * Fetches the next opcode dword.
1125 *
1126 * @returns Strict VBox status code.
1127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1128 * @param pu32 Where to return the opcode double word.
1129 */
1130DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1131{
1132 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1133 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1134 {
1135 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1136# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1137 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1138# else
1139 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1140 pVCpu->iem.s.abOpcode[offOpcode + 1],
1141 pVCpu->iem.s.abOpcode[offOpcode + 2],
1142 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1143# endif
1144 return VINF_SUCCESS;
1145 }
1146 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1147}
1148
1149# else /* IEM_WITH_SETJMP */
1150
1151/**
1152 * Fetches the next opcode dword, longjmp on error.
1153 *
1154 * @returns The opcode dword.
1155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1156 */
1157DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1158{
1159# ifdef IEM_WITH_CODE_TLB
1160 uint32_t u32Ret;
1161 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1162 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1163 if (RT_LIKELY( pbBuf != NULL
1164 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1165 {
1166 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1167# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1168 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
1169# else
1170 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1171 pbBuf[offBuf + 1],
1172 pbBuf[offBuf + 2],
1173 pbBuf[offBuf + 3]);
1174# endif
1175 }
1176 else
1177 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
1178
1179# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1180 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1181 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
1182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1183 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
1184# else
1185 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
1186 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
1187 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
1188 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
1189# endif
1190 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
1191# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1192
1193 return u32Ret;
1194
1195# else /* !IEM_WITH_CODE_TLB */
1196 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1197 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1198 {
1199 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1200# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1201 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1202# else
1203 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1204 pVCpu->iem.s.abOpcode[offOpcode + 1],
1205 pVCpu->iem.s.abOpcode[offOpcode + 2],
1206 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1207# endif
1208 }
1209 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1210# endif
1211}
1212
1213# endif /* IEM_WITH_SETJMP */
1214
1215/**
1216 * Fetches the next opcode dword, returns automatically on failure.
1217 *
1218 * @param a_pu32 Where to return the opcode dword.
1219 * @remark Implicitly references pVCpu.
1220 */
1221# ifndef IEM_WITH_SETJMP
1222# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1223 do \
1224 { \
1225 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1226 if (rcStrict2 != VINF_SUCCESS) \
1227 return rcStrict2; \
1228 } while (0)
1229# else
1230# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1231# endif
1232
1233# ifndef IEM_WITH_SETJMP
1234/**
1235 * Fetches the next opcode dword, zero extending it to a quad word.
1236 *
1237 * @returns Strict VBox status code.
1238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1239 * @param pu64 Where to return the opcode quad word.
1240 */
1241DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1242{
1243 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1244 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1245 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1246
1247 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1248 pVCpu->iem.s.abOpcode[offOpcode + 1],
1249 pVCpu->iem.s.abOpcode[offOpcode + 2],
1250 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1251 pVCpu->iem.s.offOpcode = offOpcode + 4;
1252 return VINF_SUCCESS;
1253}
1254# endif /* !IEM_WITH_SETJMP */
1255
1256/**
1257 * Fetches the next opcode dword and zero extends it to a quad word, returns
1258 * automatically on failure.
1259 *
1260 * @param a_pu64 Where to return the opcode quad word.
1261 * @remark Implicitly references pVCpu.
1262 */
1263# ifndef IEM_WITH_SETJMP
1264# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1265 do \
1266 { \
1267 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1268 if (rcStrict2 != VINF_SUCCESS) \
1269 return rcStrict2; \
1270 } while (0)
1271# else
1272# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1273# endif
1274
1275
1276# ifndef IEM_WITH_SETJMP
1277/**
1278 * Fetches the next signed double word from the opcode stream.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1282 * @param pi32 Where to return the signed double word.
1283 */
1284DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1285{
1286 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1287}
1288# endif
1289
1290/**
1291 * Fetches the next signed double word from the opcode stream, returning
1292 * automatically on failure.
1293 *
1294 * @param a_pi32 Where to return the signed double word.
1295 * @remark Implicitly references pVCpu.
1296 */
1297# ifndef IEM_WITH_SETJMP
1298# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1299 do \
1300 { \
1301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1302 if (rcStrict2 != VINF_SUCCESS) \
1303 return rcStrict2; \
1304 } while (0)
1305# else
1306# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1307# endif
1308
1309# ifndef IEM_WITH_SETJMP
1310/**
1311 * Fetches the next opcode dword, sign extending it into a quad word.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1315 * @param pu64 Where to return the opcode quad word.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1318{
1319 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1320 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1321 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1322
1323 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1324 pVCpu->iem.s.abOpcode[offOpcode + 1],
1325 pVCpu->iem.s.abOpcode[offOpcode + 2],
1326 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1327 *pu64 = (uint64_t)(int64_t)i32;
1328 pVCpu->iem.s.offOpcode = offOpcode + 4;
1329 return VINF_SUCCESS;
1330}
1331# endif /* !IEM_WITH_SETJMP */
1332
1333/**
1334 * Fetches the next opcode double word and sign extends it to a quad word,
1335 * returns automatically on failure.
1336 *
1337 * @param a_pu64 Where to return the opcode quad word.
1338 * @remark Implicitly references pVCpu.
1339 */
1340# ifndef IEM_WITH_SETJMP
1341# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1342 do \
1343 { \
1344 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1345 if (rcStrict2 != VINF_SUCCESS) \
1346 return rcStrict2; \
1347 } while (0)
1348# else
1349# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1350# endif
1351
1352# ifndef IEM_WITH_SETJMP
1353
1354/**
1355 * Fetches the next opcode qword.
1356 *
1357 * @returns Strict VBox status code.
1358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1359 * @param pu64 Where to return the opcode qword.
1360 */
1361DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1362{
1363 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1364 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1365 {
1366# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1367 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1368# else
1369 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1370 pVCpu->iem.s.abOpcode[offOpcode + 1],
1371 pVCpu->iem.s.abOpcode[offOpcode + 2],
1372 pVCpu->iem.s.abOpcode[offOpcode + 3],
1373 pVCpu->iem.s.abOpcode[offOpcode + 4],
1374 pVCpu->iem.s.abOpcode[offOpcode + 5],
1375 pVCpu->iem.s.abOpcode[offOpcode + 6],
1376 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1377# endif
1378 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1379 return VINF_SUCCESS;
1380 }
1381 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1382}
1383
1384# else /* IEM_WITH_SETJMP */
1385
1386/**
1387 * Fetches the next opcode qword, longjmp on error.
1388 *
1389 * @returns The opcode qword.
1390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1391 */
1392DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1393{
1394# ifdef IEM_WITH_CODE_TLB
1395 uint64_t u64Ret;
1396 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1397 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1398 if (RT_LIKELY( pbBuf != NULL
1399 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1400 {
1401 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1402# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1403 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
1404# else
1405 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1406 pbBuf[offBuf + 1],
1407 pbBuf[offBuf + 2],
1408 pbBuf[offBuf + 3],
1409 pbBuf[offBuf + 4],
1410 pbBuf[offBuf + 5],
1411 pbBuf[offBuf + 6],
1412 pbBuf[offBuf + 7]);
1413# endif
1414 }
1415 else
1416 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
1417
1418# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1419 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1420 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
1421# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1422 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
1423# else
1424 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
1425 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
1426 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
1427 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
1428 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
1429 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
1430 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
1431 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
1432# endif
1433 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
1434# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1435
1436 return u64Ret;
1437
1438# else /* !IEM_WITH_CODE_TLB */
1439 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1440 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1441 {
1442 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1443# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1444 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1445# else
1446 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1447 pVCpu->iem.s.abOpcode[offOpcode + 1],
1448 pVCpu->iem.s.abOpcode[offOpcode + 2],
1449 pVCpu->iem.s.abOpcode[offOpcode + 3],
1450 pVCpu->iem.s.abOpcode[offOpcode + 4],
1451 pVCpu->iem.s.abOpcode[offOpcode + 5],
1452 pVCpu->iem.s.abOpcode[offOpcode + 6],
1453 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1454# endif
1455 }
1456 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1457# endif /* !IEM_WITH_CODE_TLB */
1458}
1459
1460# endif /* IEM_WITH_SETJMP */
1461
1462/**
1463 * Fetches the next opcode quad word, returns automatically on failure.
1464 *
1465 * @param a_pu64 Where to return the opcode quad word.
1466 * @remark Implicitly references pVCpu.
1467 */
1468# ifndef IEM_WITH_SETJMP
1469# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1470 do \
1471 { \
1472 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1473 if (rcStrict2 != VINF_SUCCESS) \
1474 return rcStrict2; \
1475 } while (0)
1476# else
1477# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1478# endif
1479
1480/**
1481 * For fetching the opcode bytes for an ModR/M effective address, but throw
1482 * away the result.
1483 *
1484 * This is used when decoding undefined opcodes and such where we want to avoid
1485 * unnecessary MC blocks.
1486 *
1487 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
1488 * used instead. At least for now...
1489 */
1490# ifndef IEM_WITH_SETJMP
1491# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1492 RTGCPTR GCPtrEff; \
1493 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1494 if (rcStrict != VINF_SUCCESS) \
1495 return rcStrict; \
1496 } while (0)
1497# else
1498# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1499 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1500 } while (0)
1501# endif
1502
1503#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1504
1505
1506/** @name Misc Worker Functions.
1507 * @{
1508 */
1509
1510/**
1511 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1512 * not (kind of obsolete now).
1513 *
1514 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1515 */
1516#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1517
1518/**
1519 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1520 *
1521 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1522 * @param a_fEfl The new EFLAGS.
1523 */
1524#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1525
1526
1527/**
1528 * Loads a NULL data selector into a selector register, both the hidden and
1529 * visible parts, in protected mode.
1530 *
1531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1532 * @param pSReg Pointer to the segment register.
1533 * @param uRpl The RPL.
1534 */
1535DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1536{
1537 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1538 * data selector in protected mode. */
1539 pSReg->Sel = uRpl;
1540 pSReg->ValidSel = uRpl;
1541 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1542 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1543 {
1544 /* VT-x (Intel 3960x) observed doing something like this. */
1545 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1546 pSReg->u32Limit = UINT32_MAX;
1547 pSReg->u64Base = 0;
1548 }
1549 else
1550 {
1551 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1552 pSReg->u32Limit = 0;
1553 pSReg->u64Base = 0;
1554 }
1555}
1556
1557/** @} */
1558
1559
1560/*
1561 *
1562 * Helpers routines.
1563 * Helpers routines.
1564 * Helpers routines.
1565 *
1566 */
1567
1568#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1569
1570/**
1571 * Recalculates the effective operand size.
1572 *
1573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1574 */
1575DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1576{
1577 switch (IEM_GET_CPU_MODE(pVCpu))
1578 {
1579 case IEMMODE_16BIT:
1580 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1581 break;
1582 case IEMMODE_32BIT:
1583 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1584 break;
1585 case IEMMODE_64BIT:
1586 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1587 {
1588 case 0:
1589 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1590 break;
1591 case IEM_OP_PRF_SIZE_OP:
1592 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1593 break;
1594 case IEM_OP_PRF_SIZE_REX_W:
1595 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1596 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1597 break;
1598 }
1599 break;
1600 default:
1601 AssertFailed();
1602 }
1603}
1604
1605
1606/**
1607 * Sets the default operand size to 64-bit and recalculates the effective
1608 * operand size.
1609 *
1610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1611 */
1612DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1613{
1614 Assert(IEM_IS_64BIT_CODE(pVCpu));
1615 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1616 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1617 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1618 else
1619 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1620}
1621
1622
1623/**
1624 * Sets the default operand size to 64-bit and recalculates the effective
1625 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1626 *
1627 * This is for the relative jumps.
1628 *
1629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1630 */
1631DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1632{
1633 Assert(IEM_IS_64BIT_CODE(pVCpu));
1634 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1635 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1636 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1637 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1638 else
1639 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1640}
1641
1642#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1643
1644
1645
1646/** @name Register Access.
1647 * @{
1648 */
1649
1650/**
1651 * Gets a reference (pointer) to the specified hidden segment register.
1652 *
1653 * @returns Hidden register reference.
1654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1655 * @param iSegReg The segment register.
1656 */
1657DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1658{
1659 Assert(iSegReg < X86_SREG_COUNT);
1660 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1661 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1662
1663 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1664 return pSReg;
1665}
1666
1667
1668/**
1669 * Ensures that the given hidden segment register is up to date.
1670 *
1671 * @returns Hidden register reference.
1672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1673 * @param pSReg The segment register.
1674 */
1675DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1676{
1677 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1678 NOREF(pVCpu);
1679 return pSReg;
1680}
1681
1682
1683/**
1684 * Gets a reference (pointer) to the specified segment register (the selector
1685 * value).
1686 *
1687 * @returns Pointer to the selector variable.
1688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1689 * @param iSegReg The segment register.
1690 */
1691DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1692{
1693 Assert(iSegReg < X86_SREG_COUNT);
1694 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1695 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1696}
1697
1698
1699/**
1700 * Fetches the selector value of a segment register.
1701 *
1702 * @returns The selector value.
1703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1704 * @param iSegReg The segment register.
1705 */
1706DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1707{
1708 Assert(iSegReg < X86_SREG_COUNT);
1709 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1710 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1711}
1712
1713
1714/**
1715 * Fetches the base address value of a segment register.
1716 *
1717 * @returns The selector value.
1718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1719 * @param iSegReg The segment register.
1720 */
1721DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1722{
1723 Assert(iSegReg < X86_SREG_COUNT);
1724 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1725 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1726}
1727
1728
1729/**
1730 * Gets a reference (pointer) to the specified general purpose register.
1731 *
1732 * @returns Register reference.
1733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1734 * @param iReg The general purpose register.
1735 */
1736DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1737{
1738 Assert(iReg < 16);
1739 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1740}
1741
1742
1743#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1744/**
1745 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1746 *
1747 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1748 *
1749 * @returns Register reference.
1750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1751 * @param iReg The register.
1752 */
1753DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1754{
1755 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1756 {
1757 Assert(iReg < 16);
1758 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1759 }
1760 /* high 8-bit register. */
1761 Assert(iReg < 8);
1762 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1763}
1764#endif
1765
1766
1767/**
1768 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1769 * alternative version with extended (20) register index.
1770 *
1771 * @returns Register reference.
1772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1773 * @param iRegEx The register. The 16 first are regular ones,
1774 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1775 */
1776DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1777{
1778 /** @todo This could be done by double indexing on little endian hosts:
1779 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
1780 if (iRegEx < 16)
1781 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1782
1783 /* high 8-bit register. */
1784 Assert(iRegEx < 20);
1785 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1786}
1787
1788
1789/**
1790 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1791 *
1792 * @returns Register reference.
1793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1794 * @param iReg The register.
1795 */
1796DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1797{
1798 Assert(iReg < 16);
1799 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1800}
1801
1802
1803/**
1804 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1805 *
1806 * @returns Register reference.
1807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1808 * @param iReg The register.
1809 */
1810DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1811{
1812 Assert(iReg < 16);
1813 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1814}
1815
1816
1817/**
1818 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1819 *
1820 * @returns Register reference.
1821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1822 * @param iReg The register.
1823 */
1824DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1825{
1826 Assert(iReg < 16);
1827 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1828}
1829
1830
1831/**
1832 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1833 *
1834 * @returns Register reference.
1835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1836 * @param iReg The register.
1837 */
1838DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1839{
1840 Assert(iReg < 64);
1841 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1842}
1843
1844
1845/**
1846 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1847 *
1848 * @returns Register reference.
1849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1850 * @param iReg The register.
1851 */
1852DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1853{
1854 Assert(iReg < 16);
1855 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1856}
1857
1858
1859/**
1860 * Gets a reference (pointer) to the specified segment register's base address.
1861 *
1862 * @returns Segment register base address reference.
1863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1864 * @param iSegReg The segment selector.
1865 */
1866DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1867{
1868 Assert(iSegReg < X86_SREG_COUNT);
1869 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1870 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1871}
1872
1873
1874#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1875/**
1876 * Fetches the value of a 8-bit general purpose register.
1877 *
1878 * @returns The register value.
1879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1880 * @param iReg The register.
1881 */
1882DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1883{
1884 return *iemGRegRefU8(pVCpu, iReg);
1885}
1886#endif
1887
1888
1889/**
1890 * Fetches the value of a 8-bit general purpose register, alternative version
1891 * with extended (20) register index.
1892
1893 * @returns The register value.
1894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1895 * @param iRegEx The register. The 16 first are regular ones,
1896 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1897 */
1898DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1899{
1900 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1901}
1902
1903
1904/**
1905 * Fetches the value of a 16-bit general purpose register.
1906 *
1907 * @returns The register value.
1908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1909 * @param iReg The register.
1910 */
1911DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1912{
1913 Assert(iReg < 16);
1914 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1915}
1916
1917
1918/**
1919 * Fetches the value of a 32-bit general purpose register.
1920 *
1921 * @returns The register value.
1922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1923 * @param iReg The register.
1924 */
1925DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1926{
1927 Assert(iReg < 16);
1928 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1929}
1930
1931
1932/**
1933 * Fetches the value of a 64-bit general purpose register.
1934 *
1935 * @returns The register value.
1936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1937 * @param iReg The register.
1938 */
1939DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1940{
1941 Assert(iReg < 16);
1942 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1943}
1944
1945
1946/**
1947 * Stores a 16-bit value to a general purpose register.
1948 *
1949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1950 * @param iReg The register.
1951 * @param uValue The value to store.
1952 */
1953DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
1954{
1955 Assert(iReg < 16);
1956 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
1957}
1958
1959
1960/**
1961 * Stores a 32-bit value to a general purpose register, implicitly clearing high
1962 * values.
1963 *
1964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1965 * @param iReg The register.
1966 * @param uValue The value to store.
1967 */
1968DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
1969{
1970 Assert(iReg < 16);
1971 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1972}
1973
1974
1975/**
1976 * Stores a 64-bit value to a general purpose register.
1977 *
1978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1979 * @param iReg The register.
1980 * @param uValue The value to store.
1981 */
1982DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
1983{
1984 Assert(iReg < 16);
1985 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1986}
1987
1988
1989/**
1990 * Get the address of the top of the stack.
1991 *
1992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1993 */
1994DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1995{
1996 if (IEM_IS_64BIT_CODE(pVCpu))
1997 return pVCpu->cpum.GstCtx.rsp;
1998 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1999 return pVCpu->cpum.GstCtx.esp;
2000 return pVCpu->cpum.GstCtx.sp;
2001}
2002
2003
2004/**
2005 * Updates the RIP/EIP/IP to point to the next instruction.
2006 *
2007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2008 * @param cbInstr The number of bytes to add.
2009 */
2010DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2011{
2012 /*
2013 * Advance RIP.
2014 *
2015 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
2016 * while in all other modes except LM64 the updates are 32-bit. This means
2017 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
2018 * 4GB and 64KB rollovers, and decide whether anything needs masking.
2019 *
2020 * See PC wrap around tests in bs3-cpu-weird-1.
2021 */
2022 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
2023 uint64_t const uRipNext = uRipPrev + cbInstr;
2024 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
2025 || IEM_IS_64BIT_CODE(pVCpu)))
2026 pVCpu->cpum.GstCtx.rip = uRipNext;
2027 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
2028 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
2029 else
2030 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
2031}
2032
2033
2034/**
2035 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
2036 * following EFLAGS bits are set:
2037 * - X86_EFL_RF - clear it.
2038 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
2039 * - X86_EFL_TF - generate single step \#DB trap.
2040 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
2041 * instruction).
2042 *
2043 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
2044 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
2045 * takes priority over both NMIs and hardware interrupts. So, neither is
2046 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
2047 * either unsupported will be triggered on-top of any \#DB raised here.)
2048 *
2049 * The RF flag only needs to be cleared here as it only suppresses instruction
2050 * breakpoints which are not raised here (happens synchronously during
2051 * instruction fetching).
2052 *
2053 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
2054 * status has no bearing on whether \#DB exceptions are raised.
2055 *
2056 * @note This must *NOT* be called by the two instructions setting the
2057 * CPUMCTX_INHIBIT_SHADOW_SS flag.
2058 *
2059 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
2060 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
2061 * Stacks}
2062 */
2063static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2064{
2065 /*
2066 * Normally we're just here to clear RF and/or interrupt shadow bits.
2067 */
2068 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
2069 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
2070 else
2071 {
2072 /*
2073 * Raise a #DB or/and DBGF event.
2074 */
2075 VBOXSTRICTRC rcStrict;
2076 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
2077 {
2078 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2079 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2080 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
2081 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2082 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2083 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2084 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2085 pVCpu->cpum.GstCtx.rflags.uBoth));
2086
2087 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2088 rcStrict = iemRaiseDebugException(pVCpu);
2089
2090 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2091 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2092 {
2093 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2094 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2095 }
2096 }
2097 else
2098 {
2099 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2100 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2101 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2102 }
2103 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2104 Assert(rcStrict != VINF_SUCCESS);
2105 return rcStrict;
2106 }
2107 return rcNormal;
2108}
2109
2110
2111/**
2112 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2113 *
2114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2115 * @param rcNormal VINF_SUCCESS to continue TB.
2116 * VINF_IEM_REEXEC_BREAK to force TB exit when
2117 * taking the wrong conditional branhc.
2118 */
2119DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2120{
2121 /*
2122 * We assume that most of the time nothing actually needs doing here.
2123 */
2124 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2125 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2126 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2127 return rcNormal;
2128 return iemFinishInstructionWithFlagsSet(pVCpu, rcNormal);
2129}
2130
2131
2132/**
2133 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2134 * and CPUMCTX_INHIBIT_SHADOW.
2135 *
2136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2137 * @param cbInstr The number of bytes to add.
2138 */
2139DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2140{
2141 iemRegAddToRip(pVCpu, cbInstr);
2142 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2143}
2144
2145
2146/**
2147 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2148 * and CPUMCTX_INHIBIT_SHADOW.
2149 *
2150 * Only called from 64-bit code.
2151 *
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 * @param cbInstr The number of bytes to add.
2154 * @param rcNormal VINF_SUCCESS to continue TB.
2155 * VINF_IEM_REEXEC_BREAK to force TB exit when
2156 * taking the wrong conditional branhc.
2157 */
2158DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2159{
2160 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2161 return iemRegFinishClearingRF(pVCpu, rcNormal);
2162}
2163
2164
2165/**
2166 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2167 * CPUMCTX_INHIBIT_SHADOW.
2168 *
2169 * This is never from 64-bit code.
2170 *
2171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2172 * @param cbInstr The number of bytes to add.
2173 * @param rcNormal VINF_SUCCESS to continue TB.
2174 * VINF_IEM_REEXEC_BREAK to force TB exit when
2175 * taking the wrong conditional branhc.
2176 */
2177DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2178{
2179 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2180 return iemRegFinishClearingRF(pVCpu, rcNormal);
2181}
2182
2183
2184/**
2185 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
2186 * CPUMCTX_INHIBIT_SHADOW.
2187 *
2188 * This is only ever used from 16-bit code on a pre-386 CPU.
2189 *
2190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2191 * @param cbInstr The number of bytes to add.
2192 * @param rcNormal VINF_SUCCESS to continue TB.
2193 * VINF_IEM_REEXEC_BREAK to force TB exit when
2194 * taking the wrong conditional branhc.
2195 */
2196DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2197{
2198 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2199 return iemRegFinishClearingRF(pVCpu, rcNormal);
2200}
2201
2202
2203/**
2204 * Tail method for a finish function that does't clear flags or raise \#DB.
2205 *
2206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2207 * @param rcNormal VINF_SUCCESS to continue TB.
2208 * VINF_IEM_REEXEC_BREAK to force TB exit when
2209 * taking the wrong conditional branhc.
2210 */
2211DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishNoFlags(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2212{
2213 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2214 Assert(!( pVCpu->cpum.GstCtx.eflags.uBoth
2215 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) );
2216 RT_NOREF(pVCpu);
2217 return rcNormal;
2218}
2219
2220
2221/**
2222 * Updates the RIP to point to the next instruction, but does not need to clear
2223 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2224 *
2225 * Only called from 64-bit code.
2226 *
2227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2228 * @param cbInstr The number of bytes to add.
2229 * @param rcNormal VINF_SUCCESS to continue TB.
2230 * VINF_IEM_REEXEC_BREAK to force TB exit when
2231 * taking the wrong conditional branhc.
2232 */
2233DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2234{
2235 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2236 return iemRegFinishNoFlags(pVCpu, rcNormal);
2237}
2238
2239
2240/**
2241 * Updates the EIP to point to the next instruction, but does not need to clear
2242 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2243 *
2244 * This is never from 64-bit code.
2245 *
2246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2247 * @param cbInstr The number of bytes to add.
2248 * @param rcNormal VINF_SUCCESS to continue TB.
2249 * VINF_IEM_REEXEC_BREAK to force TB exit when
2250 * taking the wrong conditional branhc.
2251 */
2252DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2253{
2254 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2255 return iemRegFinishNoFlags(pVCpu, rcNormal);
2256}
2257
2258
2259/**
2260 * Updates the IP to point to the next instruction, but does not need to clear
2261 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2262 *
2263 * This is only ever used from 16-bit code on a pre-386 CPU.
2264 *
2265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2266 * @param cbInstr The number of bytes to add.
2267 * @param rcNormal VINF_SUCCESS to continue TB.
2268 * VINF_IEM_REEXEC_BREAK to force TB exit when
2269 * taking the wrong conditional branhc.
2270 *
2271 */
2272DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2273{
2274 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2275 return iemRegFinishNoFlags(pVCpu, rcNormal);
2276}
2277
2278
2279/**
2280 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
2281 *
2282 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2283 * segment limit.
2284 *
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param cbInstr Instruction size.
2287 * @param offNextInstr The offset of the next instruction.
2288 * @param enmEffOpSize Effective operand size.
2289 * @param rcNormal VINF_SUCCESS to continue TB.
2290 * VINF_IEM_REEXEC_BREAK to force TB exit when
2291 * taking the wrong conditional branhc.
2292 */
2293DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2294 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2295{
2296 Assert(IEM_IS_64BIT_CODE(pVCpu));
2297 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2298
2299 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2300 if (enmEffOpSize == IEMMODE_16BIT)
2301 uNewRip &= UINT16_MAX;
2302
2303 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2304 pVCpu->cpum.GstCtx.rip = uNewRip;
2305 else
2306 return iemRaiseGeneralProtectionFault0(pVCpu);
2307
2308#ifndef IEM_WITH_CODE_TLB
2309 iemOpcodeFlushLight(pVCpu, cbInstr);
2310#endif
2311
2312 /*
2313 * Clear RF and finish the instruction (maybe raise #DB).
2314 */
2315 return iemRegFinishClearingRF(pVCpu, rcNormal);
2316}
2317
2318
2319/**
2320 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2321 * code (never 64-bit).
2322 *
2323 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2324 * segment limit.
2325 *
2326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2327 * @param cbInstr Instruction size.
2328 * @param offNextInstr The offset of the next instruction.
2329 * @param enmEffOpSize Effective operand size.
2330 * @param rcNormal VINF_SUCCESS to continue TB.
2331 * VINF_IEM_REEXEC_BREAK to force TB exit when
2332 * taking the wrong conditional branhc.
2333 */
2334DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2335 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2336{
2337 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2338 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2339
2340 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2341 if (enmEffOpSize == IEMMODE_16BIT)
2342 uNewEip &= UINT16_MAX;
2343 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2344 pVCpu->cpum.GstCtx.rip = uNewEip;
2345 else
2346 return iemRaiseGeneralProtectionFault0(pVCpu);
2347
2348#ifndef IEM_WITH_CODE_TLB
2349 iemOpcodeFlushLight(pVCpu, cbInstr);
2350#endif
2351
2352 /*
2353 * Clear RF and finish the instruction (maybe raise #DB).
2354 */
2355 return iemRegFinishClearingRF(pVCpu, rcNormal);
2356}
2357
2358
2359/**
2360 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
2361 *
2362 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2363 * segment limit.
2364 *
2365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2366 * @param cbInstr Instruction size.
2367 * @param offNextInstr The offset of the next instruction.
2368 * @param rcNormal VINF_SUCCESS to continue TB.
2369 * VINF_IEM_REEXEC_BREAK to force TB exit when
2370 * taking the wrong conditional branhc.
2371 */
2372DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2373 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
2374{
2375 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2376
2377 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2378 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2379 pVCpu->cpum.GstCtx.rip = uNewIp;
2380 else
2381 return iemRaiseGeneralProtectionFault0(pVCpu);
2382
2383#ifndef IEM_WITH_CODE_TLB
2384 iemOpcodeFlushLight(pVCpu, cbInstr);
2385#endif
2386
2387 /*
2388 * Clear RF and finish the instruction (maybe raise #DB).
2389 */
2390 return iemRegFinishClearingRF(pVCpu, rcNormal);
2391}
2392
2393
2394/**
2395 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
2396 * clearing of flags.
2397 *
2398 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2399 * segment limit.
2400 *
2401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2402 * @param cbInstr Instruction size.
2403 * @param offNextInstr The offset of the next instruction.
2404 * @param enmEffOpSize Effective operand size.
2405 * @param rcNormal VINF_SUCCESS to continue TB.
2406 * VINF_IEM_REEXEC_BREAK to force TB exit when
2407 * taking the wrong conditional branhc.
2408 */
2409DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2410 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2411{
2412 Assert(IEM_IS_64BIT_CODE(pVCpu));
2413 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2414
2415 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2416 if (enmEffOpSize == IEMMODE_16BIT)
2417 uNewRip &= UINT16_MAX;
2418
2419 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2420 pVCpu->cpum.GstCtx.rip = uNewRip;
2421 else
2422 return iemRaiseGeneralProtectionFault0(pVCpu);
2423
2424#ifndef IEM_WITH_CODE_TLB
2425 iemOpcodeFlushLight(pVCpu, cbInstr);
2426#endif
2427 return iemRegFinishNoFlags(pVCpu, rcNormal);
2428}
2429
2430
2431/**
2432 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2433 * code (never 64-bit), no checking or clearing of flags.
2434 *
2435 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2436 * segment limit.
2437 *
2438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2439 * @param cbInstr Instruction size.
2440 * @param offNextInstr The offset of the next instruction.
2441 * @param enmEffOpSize Effective operand size.
2442 * @param rcNormal VINF_SUCCESS to continue TB.
2443 * VINF_IEM_REEXEC_BREAK to force TB exit when
2444 * taking the wrong conditional branhc.
2445 */
2446DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2447 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2448{
2449 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2450 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2451
2452 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2453 if (enmEffOpSize == IEMMODE_16BIT)
2454 uNewEip &= UINT16_MAX;
2455 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2456 pVCpu->cpum.GstCtx.rip = uNewEip;
2457 else
2458 return iemRaiseGeneralProtectionFault0(pVCpu);
2459
2460#ifndef IEM_WITH_CODE_TLB
2461 iemOpcodeFlushLight(pVCpu, cbInstr);
2462#endif
2463 return iemRegFinishNoFlags(pVCpu, rcNormal);
2464}
2465
2466
2467/**
2468 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
2469 * clearing of flags.
2470 *
2471 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2472 * segment limit.
2473 *
2474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2475 * @param cbInstr Instruction size.
2476 * @param offNextInstr The offset of the next instruction.
2477 * @param rcNormal VINF_SUCCESS to continue TB.
2478 * VINF_IEM_REEXEC_BREAK to force TB exit when
2479 * taking the wrong conditional branhc.
2480 */
2481DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2482 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
2483{
2484 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2485
2486 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2487 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2488 pVCpu->cpum.GstCtx.rip = uNewIp;
2489 else
2490 return iemRaiseGeneralProtectionFault0(pVCpu);
2491
2492#ifndef IEM_WITH_CODE_TLB
2493 iemOpcodeFlushLight(pVCpu, cbInstr);
2494#endif
2495 return iemRegFinishNoFlags(pVCpu, rcNormal);
2496}
2497
2498
2499/**
2500 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
2501 *
2502 * @returns Strict VBox status code.
2503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2504 * @param cbInstr Instruction size.
2505 * @param offNextInstr The offset of the next instruction.
2506 * @param rcNormal VINF_SUCCESS to continue TB.
2507 * VINF_IEM_REEXEC_BREAK to force TB exit when
2508 * taking the wrong conditional branhc.
2509 */
2510DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2511 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2512{
2513 Assert(IEM_IS_64BIT_CODE(pVCpu));
2514
2515 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2516
2517#ifndef IEM_WITH_CODE_TLB
2518 iemOpcodeFlushLight(pVCpu, cbInstr);
2519#endif
2520
2521 /*
2522 * Clear RF and finish the instruction (maybe raise #DB).
2523 */
2524 return iemRegFinishClearingRF(pVCpu, rcNormal);
2525}
2526
2527
2528/**
2529 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
2530 *
2531 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2532 * segment limit.
2533 *
2534 * @returns Strict VBox status code.
2535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2536 * @param cbInstr Instruction size.
2537 * @param offNextInstr The offset of the next instruction.
2538 * @param rcNormal VINF_SUCCESS to continue TB.
2539 * VINF_IEM_REEXEC_BREAK to force TB exit when
2540 * taking the wrong conditional branhc.
2541 *
2542 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2543 * identical.
2544 */
2545DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2546 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2547{
2548 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2549
2550 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2551 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2552 pVCpu->cpum.GstCtx.rip = uNewIp;
2553 else
2554 return iemRaiseGeneralProtectionFault0(pVCpu);
2555
2556#ifndef IEM_WITH_CODE_TLB
2557 iemOpcodeFlushLight(pVCpu, cbInstr);
2558#endif
2559
2560 /*
2561 * Clear RF and finish the instruction (maybe raise #DB).
2562 */
2563 return iemRegFinishClearingRF(pVCpu, rcNormal);
2564}
2565
2566
2567/**
2568 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
2569 * clearing of flags.
2570 *
2571 * @returns Strict VBox status code.
2572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2573 * @param cbInstr Instruction size.
2574 * @param offNextInstr The offset of the next instruction.
2575 * @param rcNormal VINF_SUCCESS to continue TB.
2576 * VINF_IEM_REEXEC_BREAK to force TB exit when
2577 * taking the wrong conditional branhc.
2578 */
2579DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2580 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2581{
2582 Assert(IEM_IS_64BIT_CODE(pVCpu));
2583
2584 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2585
2586#ifndef IEM_WITH_CODE_TLB
2587 iemOpcodeFlushLight(pVCpu, cbInstr);
2588#endif
2589 return iemRegFinishNoFlags(pVCpu, rcNormal);
2590}
2591
2592
2593/**
2594 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
2595 * no checking or clearing of flags.
2596 *
2597 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2598 * segment limit.
2599 *
2600 * @returns Strict VBox status code.
2601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2602 * @param cbInstr Instruction size.
2603 * @param offNextInstr The offset of the next instruction.
2604 * @param rcNormal VINF_SUCCESS to continue TB.
2605 * VINF_IEM_REEXEC_BREAK to force TB exit when
2606 * taking the wrong conditional branhc.
2607 *
2608 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2609 * identical.
2610 */
2611DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2612 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2613{
2614 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2615
2616 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2617 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2618 pVCpu->cpum.GstCtx.rip = uNewIp;
2619 else
2620 return iemRaiseGeneralProtectionFault0(pVCpu);
2621
2622#ifndef IEM_WITH_CODE_TLB
2623 iemOpcodeFlushLight(pVCpu, cbInstr);
2624#endif
2625 return iemRegFinishNoFlags(pVCpu, rcNormal);
2626}
2627
2628
2629/**
2630 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2631 *
2632 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2633 * segment limit.
2634 *
2635 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2636 * only alternative for relative jumps in 64-bit code and that is already
2637 * handled in the decoder stage.
2638 *
2639 * @returns Strict VBox status code.
2640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2641 * @param cbInstr Instruction size.
2642 * @param offNextInstr The offset of the next instruction.
2643 * @param rcNormal VINF_SUCCESS to continue TB.
2644 * VINF_IEM_REEXEC_BREAK to force TB exit when
2645 * taking the wrong conditional branhc.
2646 */
2647DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2648 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2649{
2650 Assert(IEM_IS_64BIT_CODE(pVCpu));
2651
2652 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2653 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2654 pVCpu->cpum.GstCtx.rip = uNewRip;
2655 else
2656 return iemRaiseGeneralProtectionFault0(pVCpu);
2657
2658#ifndef IEM_WITH_CODE_TLB
2659 iemOpcodeFlushLight(pVCpu, cbInstr);
2660#endif
2661
2662 /*
2663 * Clear RF and finish the instruction (maybe raise #DB).
2664 */
2665 return iemRegFinishClearingRF(pVCpu, rcNormal);
2666}
2667
2668
2669/**
2670 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2671 *
2672 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2673 * segment limit.
2674 *
2675 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2676 * only alternative for relative jumps in 32-bit code and that is already
2677 * handled in the decoder stage.
2678 *
2679 * @returns Strict VBox status code.
2680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2681 * @param cbInstr Instruction size.
2682 * @param offNextInstr The offset of the next instruction.
2683 * @param rcNormal VINF_SUCCESS to continue TB.
2684 * VINF_IEM_REEXEC_BREAK to force TB exit when
2685 * taking the wrong conditional branhc.
2686 */
2687DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2688 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2689{
2690 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2691 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2692
2693 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2694 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2695 pVCpu->cpum.GstCtx.rip = uNewEip;
2696 else
2697 return iemRaiseGeneralProtectionFault0(pVCpu);
2698
2699#ifndef IEM_WITH_CODE_TLB
2700 iemOpcodeFlushLight(pVCpu, cbInstr);
2701#endif
2702
2703 /*
2704 * Clear RF and finish the instruction (maybe raise #DB).
2705 */
2706 return iemRegFinishClearingRF(pVCpu, rcNormal);
2707}
2708
2709
2710/**
2711 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
2712 * clearing of flags.
2713 *
2714 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2715 * segment limit.
2716 *
2717 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2718 * only alternative for relative jumps in 64-bit code and that is already
2719 * handled in the decoder stage.
2720 *
2721 * @returns Strict VBox status code.
2722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2723 * @param cbInstr Instruction size.
2724 * @param offNextInstr The offset of the next instruction.
2725 * @param rcNormal VINF_SUCCESS to continue TB.
2726 * VINF_IEM_REEXEC_BREAK to force TB exit when
2727 * taking the wrong conditional branhc.
2728 */
2729DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2730 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2731{
2732 Assert(IEM_IS_64BIT_CODE(pVCpu));
2733
2734 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2735 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2736 pVCpu->cpum.GstCtx.rip = uNewRip;
2737 else
2738 return iemRaiseGeneralProtectionFault0(pVCpu);
2739
2740#ifndef IEM_WITH_CODE_TLB
2741 iemOpcodeFlushLight(pVCpu, cbInstr);
2742#endif
2743 return iemRegFinishNoFlags(pVCpu, rcNormal);
2744}
2745
2746
2747/**
2748 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
2749 * clearing of flags.
2750 *
2751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2752 * segment limit.
2753 *
2754 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2755 * only alternative for relative jumps in 32-bit code and that is already
2756 * handled in the decoder stage.
2757 *
2758 * @returns Strict VBox status code.
2759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2760 * @param cbInstr Instruction size.
2761 * @param offNextInstr The offset of the next instruction.
2762 * @param rcNormal VINF_SUCCESS to continue TB.
2763 * VINF_IEM_REEXEC_BREAK to force TB exit when
2764 * taking the wrong conditional branhc.
2765 */
2766DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2767 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2768{
2769 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2770 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2771
2772 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2773 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2774 pVCpu->cpum.GstCtx.rip = uNewEip;
2775 else
2776 return iemRaiseGeneralProtectionFault0(pVCpu);
2777
2778#ifndef IEM_WITH_CODE_TLB
2779 iemOpcodeFlushLight(pVCpu, cbInstr);
2780#endif
2781 return iemRegFinishNoFlags(pVCpu, rcNormal);
2782}
2783
2784
2785/**
2786 * Extended version of iemFinishInstructionWithFlagsSet that goes with
2787 * iemRegAddToRipAndFinishingClearingRfEx.
2788 *
2789 * See iemFinishInstructionWithFlagsSet() for details.
2790 */
2791static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2792{
2793 /*
2794 * Raise a #DB.
2795 */
2796 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2797 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2798 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
2799 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2800 /** @todo Do we set all pending \#DB events, or just one? */
2801 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
2802 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2803 pVCpu->cpum.GstCtx.rflags.uBoth));
2804 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2805 return iemRaiseDebugException(pVCpu);
2806}
2807
2808
2809/**
2810 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
2811 * others potentially updating EFLAGS.TF.
2812 *
2813 * The single step event must be generated using the TF value at the start of
2814 * the instruction, not the new value set by it.
2815 *
2816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2817 * @param cbInstr The number of bytes to add.
2818 * @param fEflOld The EFLAGS at the start of the instruction
2819 * execution.
2820 */
2821DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
2822{
2823 iemRegAddToRip(pVCpu, cbInstr);
2824 if (!(fEflOld & X86_EFL_TF))
2825 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2826 return iemFinishInstructionWithTfSet(pVCpu);
2827}
2828
2829
2830#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2831/**
2832 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
2833 *
2834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2835 */
2836DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2837{
2838 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2839}
2840#endif
2841
2842
2843#ifdef IEM_WITH_CODE_TLB
2844
2845/**
2846 * Performs a near jump to the specified address, no checking or clearing of
2847 * flags
2848 *
2849 * May raise a \#GP(0) if the new IP outside the code segment limit.
2850 *
2851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2852 * @param uNewIp The new IP value.
2853 */
2854DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishNoFlags(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
2855{
2856 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
2857 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
2858 pVCpu->cpum.GstCtx.rip = uNewIp;
2859 else
2860 return iemRaiseGeneralProtectionFault0(pVCpu);
2861 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2862}
2863
2864
2865/**
2866 * Performs a near jump to the specified address, no checking or clearing of
2867 * flags
2868 *
2869 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
2870 *
2871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2872 * @param uNewEip The new EIP value.
2873 */
2874DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishNoFlags(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
2875{
2876 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2877 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2878 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2879 pVCpu->cpum.GstCtx.rip = uNewEip;
2880 else
2881 return iemRaiseGeneralProtectionFault0(pVCpu);
2882 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2883}
2884
2885
2886/**
2887 * Performs a near jump to the specified address, no checking or clearing of
2888 * flags.
2889 *
2890 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2891 * segment limit.
2892 *
2893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2894 * @param uNewRip The new RIP value.
2895 */
2896DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishNoFlags(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
2897{
2898 Assert(IEM_IS_64BIT_CODE(pVCpu));
2899 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2900 pVCpu->cpum.GstCtx.rip = uNewRip;
2901 else
2902 return iemRaiseGeneralProtectionFault0(pVCpu);
2903 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
2904}
2905
2906#endif /* IEM_WITH_CODE_TLB */
2907
2908/**
2909 * Performs a near jump to the specified address.
2910 *
2911 * May raise a \#GP(0) if the new IP outside the code segment limit.
2912 *
2913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2914 * @param uNewIp The new IP value.
2915 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2916 */
2917DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishClearingRF(PVMCPUCC pVCpu, uint16_t uNewIp, uint8_t cbInstr) RT_NOEXCEPT
2918{
2919 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
2920 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
2921 pVCpu->cpum.GstCtx.rip = uNewIp;
2922 else
2923 return iemRaiseGeneralProtectionFault0(pVCpu);
2924#ifndef IEM_WITH_CODE_TLB
2925 iemOpcodeFlushLight(pVCpu, cbInstr);
2926#else
2927 RT_NOREF_PV(cbInstr);
2928#endif
2929 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2930}
2931
2932
2933/**
2934 * Performs a near jump to the specified address.
2935 *
2936 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
2937 *
2938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2939 * @param uNewEip The new EIP value.
2940 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2941 */
2942DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishClearingRF(PVMCPUCC pVCpu, uint32_t uNewEip, uint8_t cbInstr) RT_NOEXCEPT
2943{
2944 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2945 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2946 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2947 pVCpu->cpum.GstCtx.rip = uNewEip;
2948 else
2949 return iemRaiseGeneralProtectionFault0(pVCpu);
2950#ifndef IEM_WITH_CODE_TLB
2951 iemOpcodeFlushLight(pVCpu, cbInstr);
2952#else
2953 RT_NOREF_PV(cbInstr);
2954#endif
2955 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2956}
2957
2958
2959/**
2960 * Performs a near jump to the specified address.
2961 *
2962 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2963 * segment limit.
2964 *
2965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2966 * @param uNewRip The new RIP value.
2967 * @param cbInstr The instruction length, for flushing in the non-TLB case.
2968 */
2969DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishClearingRF(PVMCPUCC pVCpu, uint64_t uNewRip, uint8_t cbInstr) RT_NOEXCEPT
2970{
2971 Assert(IEM_IS_64BIT_CODE(pVCpu));
2972 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2973 pVCpu->cpum.GstCtx.rip = uNewRip;
2974 else
2975 return iemRaiseGeneralProtectionFault0(pVCpu);
2976#ifndef IEM_WITH_CODE_TLB
2977 iemOpcodeFlushLight(pVCpu, cbInstr);
2978#else
2979 RT_NOREF_PV(cbInstr);
2980#endif
2981 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2982}
2983
2984
2985
2986/**
2987 * Adds to the stack pointer.
2988 *
2989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2990 * @param cbToAdd The number of bytes to add (8-bit!).
2991 */
2992DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
2993{
2994 if (IEM_IS_64BIT_CODE(pVCpu))
2995 pVCpu->cpum.GstCtx.rsp += cbToAdd;
2996 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2997 pVCpu->cpum.GstCtx.esp += cbToAdd;
2998 else
2999 pVCpu->cpum.GstCtx.sp += cbToAdd;
3000}
3001
3002
3003/**
3004 * Subtracts from the stack pointer.
3005 *
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param cbToSub The number of bytes to subtract (8-bit!).
3008 */
3009DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
3010{
3011 if (IEM_IS_64BIT_CODE(pVCpu))
3012 pVCpu->cpum.GstCtx.rsp -= cbToSub;
3013 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3014 pVCpu->cpum.GstCtx.esp -= cbToSub;
3015 else
3016 pVCpu->cpum.GstCtx.sp -= cbToSub;
3017}
3018
3019
3020/**
3021 * Adds to the temporary stack pointer.
3022 *
3023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3024 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3025 * @param cbToAdd The number of bytes to add (16-bit).
3026 */
3027DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
3028{
3029 if (IEM_IS_64BIT_CODE(pVCpu))
3030 pTmpRsp->u += cbToAdd;
3031 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3032 pTmpRsp->DWords.dw0 += cbToAdd;
3033 else
3034 pTmpRsp->Words.w0 += cbToAdd;
3035}
3036
3037
3038/**
3039 * Subtracts from the temporary stack pointer.
3040 *
3041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3042 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3043 * @param cbToSub The number of bytes to subtract.
3044 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3045 * expecting that.
3046 */
3047DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
3048{
3049 if (IEM_IS_64BIT_CODE(pVCpu))
3050 pTmpRsp->u -= cbToSub;
3051 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3052 pTmpRsp->DWords.dw0 -= cbToSub;
3053 else
3054 pTmpRsp->Words.w0 -= cbToSub;
3055}
3056
3057
3058/**
3059 * Calculates the effective stack address for a push of the specified size as
3060 * well as the new RSP value (upper bits may be masked).
3061 *
3062 * @returns Effective stack addressf for the push.
3063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3064 * @param cbItem The size of the stack item to pop.
3065 * @param puNewRsp Where to return the new RSP value.
3066 */
3067DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
3068{
3069 RTUINT64U uTmpRsp;
3070 RTGCPTR GCPtrTop;
3071 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
3072
3073 if (IEM_IS_64BIT_CODE(pVCpu))
3074 GCPtrTop = uTmpRsp.u -= cbItem;
3075 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3076 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3077 else
3078 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3079 *puNewRsp = uTmpRsp.u;
3080 return GCPtrTop;
3081}
3082
3083
3084/**
3085 * Gets the current stack pointer and calculates the value after a pop of the
3086 * specified size.
3087 *
3088 * @returns Current stack pointer.
3089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3090 * @param cbItem The size of the stack item to pop.
3091 * @param puNewRsp Where to return the new RSP value.
3092 */
3093DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
3094{
3095 RTUINT64U uTmpRsp;
3096 RTGCPTR GCPtrTop;
3097 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
3098
3099 if (IEM_IS_64BIT_CODE(pVCpu))
3100 {
3101 GCPtrTop = uTmpRsp.u;
3102 uTmpRsp.u += cbItem;
3103 }
3104 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3105 {
3106 GCPtrTop = uTmpRsp.DWords.dw0;
3107 uTmpRsp.DWords.dw0 += cbItem;
3108 }
3109 else
3110 {
3111 GCPtrTop = uTmpRsp.Words.w0;
3112 uTmpRsp.Words.w0 += cbItem;
3113 }
3114 *puNewRsp = uTmpRsp.u;
3115 return GCPtrTop;
3116}
3117
3118
3119/**
3120 * Calculates the effective stack address for a push of the specified size as
3121 * well as the new temporary RSP value (upper bits may be masked).
3122 *
3123 * @returns Effective stack addressf for the push.
3124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3125 * @param pTmpRsp The temporary stack pointer. This is updated.
3126 * @param cbItem The size of the stack item to pop.
3127 */
3128DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
3129{
3130 RTGCPTR GCPtrTop;
3131
3132 if (IEM_IS_64BIT_CODE(pVCpu))
3133 GCPtrTop = pTmpRsp->u -= cbItem;
3134 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3135 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3136 else
3137 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3138 return GCPtrTop;
3139}
3140
3141
3142/**
3143 * Gets the effective stack address for a pop of the specified size and
3144 * calculates and updates the temporary RSP.
3145 *
3146 * @returns Current stack pointer.
3147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3148 * @param pTmpRsp The temporary stack pointer. This is updated.
3149 * @param cbItem The size of the stack item to pop.
3150 */
3151DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
3152{
3153 RTGCPTR GCPtrTop;
3154 if (IEM_IS_64BIT_CODE(pVCpu))
3155 {
3156 GCPtrTop = pTmpRsp->u;
3157 pTmpRsp->u += cbItem;
3158 }
3159 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3160 {
3161 GCPtrTop = pTmpRsp->DWords.dw0;
3162 pTmpRsp->DWords.dw0 += cbItem;
3163 }
3164 else
3165 {
3166 GCPtrTop = pTmpRsp->Words.w0;
3167 pTmpRsp->Words.w0 += cbItem;
3168 }
3169 return GCPtrTop;
3170}
3171
3172/** @} */
3173
3174
3175/** @name FPU access and helpers.
3176 *
3177 * @{
3178 */
3179
3180
3181/**
3182 * Hook for preparing to use the host FPU.
3183 *
3184 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3185 *
3186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3187 */
3188DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
3189{
3190#ifdef IN_RING3
3191 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3192#else
3193 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
3194#endif
3195 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3196}
3197
3198
3199/**
3200 * Hook for preparing to use the host FPU for SSE.
3201 *
3202 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3203 *
3204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3205 */
3206DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
3207{
3208 iemFpuPrepareUsage(pVCpu);
3209}
3210
3211
3212/**
3213 * Hook for preparing to use the host FPU for AVX.
3214 *
3215 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3216 *
3217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3218 */
3219DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
3220{
3221 iemFpuPrepareUsage(pVCpu);
3222}
3223
3224
3225/**
3226 * Hook for actualizing the guest FPU state before the interpreter reads it.
3227 *
3228 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3229 *
3230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3231 */
3232DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3233{
3234#ifdef IN_RING3
3235 NOREF(pVCpu);
3236#else
3237 CPUMRZFpuStateActualizeForRead(pVCpu);
3238#endif
3239 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3240}
3241
3242
3243/**
3244 * Hook for actualizing the guest FPU state before the interpreter changes it.
3245 *
3246 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3247 *
3248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3249 */
3250DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3251{
3252#ifdef IN_RING3
3253 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3254#else
3255 CPUMRZFpuStateActualizeForChange(pVCpu);
3256#endif
3257 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3258}
3259
3260
3261/**
3262 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
3263 * only.
3264 *
3265 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3266 *
3267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3268 */
3269DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3270{
3271#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
3272 NOREF(pVCpu);
3273#else
3274 CPUMRZFpuStateActualizeSseForRead(pVCpu);
3275#endif
3276 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3277}
3278
3279
3280/**
3281 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
3282 * read+write.
3283 *
3284 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3285 *
3286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3287 */
3288DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3289{
3290#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
3291 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3292#else
3293 CPUMRZFpuStateActualizeForChange(pVCpu);
3294#endif
3295 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3296
3297 /* Make sure any changes are loaded the next time around. */
3298 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
3299}
3300
3301
3302/**
3303 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
3304 * only.
3305 *
3306 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3307 *
3308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3309 */
3310DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
3311{
3312#ifdef IN_RING3
3313 NOREF(pVCpu);
3314#else
3315 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
3316#endif
3317 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3318}
3319
3320
3321/**
3322 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
3323 * read+write.
3324 *
3325 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
3326 *
3327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3328 */
3329DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
3330{
3331#ifdef IN_RING3
3332 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
3333#else
3334 CPUMRZFpuStateActualizeForChange(pVCpu);
3335#endif
3336 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
3337
3338 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
3339 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
3340}
3341
3342
3343/**
3344 * Stores a QNaN value into a FPU register.
3345 *
3346 * @param pReg Pointer to the register.
3347 */
3348DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
3349{
3350 pReg->au32[0] = UINT32_C(0x00000000);
3351 pReg->au32[1] = UINT32_C(0xc0000000);
3352 pReg->au16[4] = UINT16_C(0xffff);
3353}
3354
3355
3356/**
3357 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
3358 *
3359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3360 * @param pFpuCtx The FPU context.
3361 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
3362 */
3363DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
3364{
3365 Assert(uFpuOpcode != UINT16_MAX);
3366 pFpuCtx->FOP = uFpuOpcode;
3367 /** @todo x87.CS and FPUIP needs to be kept seperately. */
3368 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3369 {
3370 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3371 * happens in real mode here based on the fnsave and fnstenv images. */
3372 pFpuCtx->CS = 0;
3373 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
3374 }
3375 else if (!IEM_IS_LONG_MODE(pVCpu))
3376 {
3377 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
3378 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3379 }
3380 else
3381 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
3382}
3383
3384
3385/**
3386 * Marks the specified stack register as free (for FFREE).
3387 *
3388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3389 * @param iStReg The register to free.
3390 */
3391DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3392{
3393 Assert(iStReg < 8);
3394 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3395 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3396 pFpuCtx->FTW &= ~RT_BIT(iReg);
3397}
3398
3399
3400/**
3401 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3402 *
3403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3404 */
3405DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3406{
3407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3408 uint16_t uFsw = pFpuCtx->FSW;
3409 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3410 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3411 uFsw &= ~X86_FSW_TOP_MASK;
3412 uFsw |= uTop;
3413 pFpuCtx->FSW = uFsw;
3414}
3415
3416
3417/**
3418 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3419 *
3420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3421 */
3422DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
3423{
3424 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3425 uint16_t uFsw = pFpuCtx->FSW;
3426 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3427 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3428 uFsw &= ~X86_FSW_TOP_MASK;
3429 uFsw |= uTop;
3430 pFpuCtx->FSW = uFsw;
3431}
3432
3433
3434
3435
3436DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
3437{
3438 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3439 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3440 if (pFpuCtx->FTW & RT_BIT(iReg))
3441 return VINF_SUCCESS;
3442 return VERR_NOT_FOUND;
3443}
3444
3445
3446DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
3447{
3448 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3449 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
3450 if (pFpuCtx->FTW & RT_BIT(iReg))
3451 {
3452 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
3453 return VINF_SUCCESS;
3454 }
3455 return VERR_NOT_FOUND;
3456}
3457
3458
3459DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
3460 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
3461{
3462 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3463 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3464 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
3465 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
3466 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
3467 {
3468 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
3469 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
3470 return VINF_SUCCESS;
3471 }
3472 return VERR_NOT_FOUND;
3473}
3474
3475
3476DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
3477{
3478 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3479 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3480 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
3481 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
3482 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
3483 {
3484 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
3485 return VINF_SUCCESS;
3486 }
3487 return VERR_NOT_FOUND;
3488}
3489
3490
3491/**
3492 * Rotates the stack registers when setting new TOS.
3493 *
3494 * @param pFpuCtx The FPU context.
3495 * @param iNewTop New TOS value.
3496 * @remarks We only do this to speed up fxsave/fxrstor which
3497 * arrange the FP registers in stack order.
3498 * MUST be done before writing the new TOS (FSW).
3499 */
3500DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
3501{
3502 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3503 RTFLOAT80U ar80Temp[8];
3504
3505 if (iOldTop == iNewTop)
3506 return;
3507
3508 /* Unscrew the stack and get it into 'native' order. */
3509 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
3510 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
3511 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
3512 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
3513 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
3514 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
3515 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
3516 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
3517
3518 /* Now rotate the stack to the new position. */
3519 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
3520 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
3521 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
3522 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
3523 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
3524 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
3525 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
3526 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
3527}
3528
3529
3530/**
3531 * Updates the FPU exception status after FCW is changed.
3532 *
3533 * @param pFpuCtx The FPU context.
3534 */
3535DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
3536{
3537 uint16_t u16Fsw = pFpuCtx->FSW;
3538 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
3539 u16Fsw |= X86_FSW_ES | X86_FSW_B;
3540 else
3541 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
3542 pFpuCtx->FSW = u16Fsw;
3543}
3544
3545
3546/**
3547 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
3548 *
3549 * @returns The full FTW.
3550 * @param pFpuCtx The FPU context.
3551 */
3552DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
3553{
3554 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
3555 uint16_t u16Ftw = 0;
3556 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3557 for (unsigned iSt = 0; iSt < 8; iSt++)
3558 {
3559 unsigned const iReg = (iSt + iTop) & 7;
3560 if (!(u8Ftw & RT_BIT(iReg)))
3561 u16Ftw |= 3 << (iReg * 2); /* empty */
3562 else
3563 {
3564 uint16_t uTag;
3565 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
3566 if (pr80Reg->s.uExponent == 0x7fff)
3567 uTag = 2; /* Exponent is all 1's => Special. */
3568 else if (pr80Reg->s.uExponent == 0x0000)
3569 {
3570 if (pr80Reg->s.uMantissa == 0x0000)
3571 uTag = 1; /* All bits are zero => Zero. */
3572 else
3573 uTag = 2; /* Must be special. */
3574 }
3575 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
3576 uTag = 0; /* Valid. */
3577 else
3578 uTag = 2; /* Must be special. */
3579
3580 u16Ftw |= uTag << (iReg * 2);
3581 }
3582 }
3583
3584 return u16Ftw;
3585}
3586
3587
3588/**
3589 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
3590 *
3591 * @returns The compressed FTW.
3592 * @param u16FullFtw The full FTW to convert.
3593 */
3594DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
3595{
3596 uint8_t u8Ftw = 0;
3597 for (unsigned i = 0; i < 8; i++)
3598 {
3599 if ((u16FullFtw & 3) != 3 /*empty*/)
3600 u8Ftw |= RT_BIT(i);
3601 u16FullFtw >>= 2;
3602 }
3603
3604 return u8Ftw;
3605}
3606
3607/** @} */
3608
3609
3610/** @name Memory access.
3611 *
3612 * @{
3613 */
3614
3615
3616/**
3617 * Checks whether alignment checks are enabled or not.
3618 *
3619 * @returns true if enabled, false if not.
3620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3621 */
3622DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
3623{
3624 AssertCompile(X86_CR0_AM == X86_EFL_AC);
3625 return IEM_GET_CPL(pVCpu) == 3
3626 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
3627}
3628
3629/**
3630 * Checks if the given segment can be written to, raise the appropriate
3631 * exception if not.
3632 *
3633 * @returns VBox strict status code.
3634 *
3635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3636 * @param pHid Pointer to the hidden register.
3637 * @param iSegReg The register number.
3638 * @param pu64BaseAddr Where to return the base address to use for the
3639 * segment. (In 64-bit code it may differ from the
3640 * base in the hidden segment.)
3641 */
3642DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3643 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3644{
3645 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3646
3647 if (IEM_IS_64BIT_CODE(pVCpu))
3648 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3649 else
3650 {
3651 if (!pHid->Attr.n.u1Present)
3652 {
3653 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3654 AssertRelease(uSel == 0);
3655 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3656 return iemRaiseGeneralProtectionFault0(pVCpu);
3657 }
3658
3659 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3660 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3661 && !IEM_IS_64BIT_CODE(pVCpu) )
3662 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3663 *pu64BaseAddr = pHid->u64Base;
3664 }
3665 return VINF_SUCCESS;
3666}
3667
3668
3669/**
3670 * Checks if the given segment can be read from, raise the appropriate
3671 * exception if not.
3672 *
3673 * @returns VBox strict status code.
3674 *
3675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3676 * @param pHid Pointer to the hidden register.
3677 * @param iSegReg The register number.
3678 * @param pu64BaseAddr Where to return the base address to use for the
3679 * segment. (In 64-bit code it may differ from the
3680 * base in the hidden segment.)
3681 */
3682DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3683 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3684{
3685 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3686
3687 if (IEM_IS_64BIT_CODE(pVCpu))
3688 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3689 else
3690 {
3691 if (!pHid->Attr.n.u1Present)
3692 {
3693 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3694 AssertRelease(uSel == 0);
3695 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3696 return iemRaiseGeneralProtectionFault0(pVCpu);
3697 }
3698
3699 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3700 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3701 *pu64BaseAddr = pHid->u64Base;
3702 }
3703 return VINF_SUCCESS;
3704}
3705
3706
3707/**
3708 * Maps a physical page.
3709 *
3710 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3712 * @param GCPhysMem The physical address.
3713 * @param fAccess The intended access.
3714 * @param ppvMem Where to return the mapping address.
3715 * @param pLock The PGM lock.
3716 */
3717DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3718 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3719{
3720#ifdef IEM_LOG_MEMORY_WRITES
3721 if (fAccess & IEM_ACCESS_TYPE_WRITE)
3722 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3723#endif
3724
3725 /** @todo This API may require some improving later. A private deal with PGM
3726 * regarding locking and unlocking needs to be struct. A couple of TLBs
3727 * living in PGM, but with publicly accessible inlined access methods
3728 * could perhaps be an even better solution. */
3729 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
3730 GCPhysMem,
3731 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3732 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
3733 ppvMem,
3734 pLock);
3735 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
3736 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3737
3738 return rc;
3739}
3740
3741
3742/**
3743 * Unmap a page previously mapped by iemMemPageMap.
3744 *
3745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3746 * @param GCPhysMem The physical address.
3747 * @param fAccess The intended access.
3748 * @param pvMem What iemMemPageMap returned.
3749 * @param pLock The PGM lock.
3750 */
3751DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3752 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3753{
3754 NOREF(pVCpu);
3755 NOREF(GCPhysMem);
3756 NOREF(fAccess);
3757 NOREF(pvMem);
3758 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
3759}
3760
3761#ifdef IEM_WITH_SETJMP
3762
3763/** @todo slim this down */
3764DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
3765 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3766{
3767 Assert(cbMem >= 1);
3768 Assert(iSegReg < X86_SREG_COUNT);
3769
3770 /*
3771 * 64-bit mode is simpler.
3772 */
3773 if (IEM_IS_64BIT_CODE(pVCpu))
3774 {
3775 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
3776 {
3777 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3778 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3779 GCPtrMem += pSel->u64Base;
3780 }
3781
3782 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3783 return GCPtrMem;
3784 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3785 }
3786 /*
3787 * 16-bit and 32-bit segmentation.
3788 */
3789 else if (iSegReg != UINT8_MAX)
3790 {
3791 /** @todo Does this apply to segments with 4G-1 limit? */
3792 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3793 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
3794 {
3795 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3796 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3797 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3798 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
3799 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
3800 | X86_SEL_TYPE_CODE))
3801 {
3802 case X86DESCATTR_P: /* readonly data, expand up */
3803 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
3804 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
3805 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
3806 /* expand up */
3807 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
3808 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3809 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
3810 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
3811 break;
3812
3813 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
3814 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
3815 /* expand down */
3816 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3817 && ( pSel->Attr.n.u1DefBig
3818 || GCPtrLast32 <= UINT32_C(0xffff)) ))
3819 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3820 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
3821 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
3822 break;
3823
3824 default:
3825 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
3826 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3827 break;
3828 }
3829 }
3830 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
3831 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3832 }
3833 /*
3834 * 32-bit flat address.
3835 */
3836 else
3837 return GCPtrMem;
3838}
3839
3840
3841/** @todo slim this down */
3842DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
3843 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3844{
3845 Assert(cbMem >= 1);
3846 Assert(iSegReg < X86_SREG_COUNT);
3847
3848 /*
3849 * 64-bit mode is simpler.
3850 */
3851 if (IEM_IS_64BIT_CODE(pVCpu))
3852 {
3853 if (iSegReg >= X86_SREG_FS)
3854 {
3855 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3856 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3857 GCPtrMem += pSel->u64Base;
3858 }
3859
3860 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3861 return GCPtrMem;
3862 }
3863 /*
3864 * 16-bit and 32-bit segmentation.
3865 */
3866 else
3867 {
3868 Assert(GCPtrMem <= UINT32_MAX);
3869 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3870 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3871 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3872 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
3873 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
3874 /** @todo explore exactly how the CS stuff works in real mode. See also
3875 * http://www.rcollins.org/Productivity/DescriptorCache.html and
3876 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
3877 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
3878 {
3879 /* expand up */
3880 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3881 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
3882 && GCPtrLast32 >= (uint32_t)GCPtrMem))
3883 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3884 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3885 }
3886 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
3887 {
3888 /* expand down - the uppger boundary is defined by the B bit, not G. */
3889 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3890 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
3891 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
3892 && GCPtrLast32 >= (uint32_t)GCPtrMem))
3893 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3894 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3895 }
3896 else
3897 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3898 }
3899 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3900}
3901
3902#endif /* IEM_WITH_SETJMP */
3903
3904/**
3905 * Fakes a long mode stack selector for SS = 0.
3906 *
3907 * @param pDescSs Where to return the fake stack descriptor.
3908 * @param uDpl The DPL we want.
3909 */
3910DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
3911{
3912 pDescSs->Long.au64[0] = 0;
3913 pDescSs->Long.au64[1] = 0;
3914 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3915 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
3916 pDescSs->Long.Gen.u2Dpl = uDpl;
3917 pDescSs->Long.Gen.u1Present = 1;
3918 pDescSs->Long.Gen.u1Long = 1;
3919}
3920
3921
3922/*
3923 * Unmap helpers.
3924 */
3925
3926#ifdef IEM_WITH_SETJMP
3927
3928DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3929{
3930# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3931 if (RT_LIKELY(bMapInfo == 0))
3932 return;
3933# endif
3934 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
3935}
3936
3937
3938DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3939{
3940# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3941 if (RT_LIKELY(bMapInfo == 0))
3942 return;
3943# endif
3944 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
3945}
3946
3947
3948DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3949{
3950# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3951 if (RT_LIKELY(bMapInfo == 0))
3952 return;
3953# endif
3954 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
3955}
3956
3957
3958DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
3959{
3960# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3961 if (RT_LIKELY(bMapInfo == 0))
3962 return;
3963# endif
3964 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
3965}
3966
3967DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
3968{
3969# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
3970 if (RT_LIKELY(bMapInfo == 0))
3971 return;
3972# endif
3973 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
3974}
3975
3976#endif /* IEM_WITH_SETJMP */
3977
3978
3979/*
3980 * Instantiate R/W inline templates.
3981 */
3982
3983/** @def TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
3984 * Used to check if an unaligned access is if within the page and won't
3985 * trigger an \#AC.
3986 *
3987 * This can be used to deal with misaligned accesses on platforms that are
3988 * senstive to such if desires.
3989 */
3990AssertCompile(X86_CR0_AM == X86_EFL_AC);
3991AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
3992#if 1
3993# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) \
3994 ( ((a_GCPtrEff) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(a_TmplMemType) \
3995 && !( (uint32_t)(a_pVCpu)->cpum.GstCtx.cr0 \
3996 & (a_pVCpu)->cpum.GstCtx.eflags.u \
3997 & ((IEM_GET_CPL((a_pVCpu)) + 1U) << 16) /* IEM_GET_CPL(a_pVCpu) == 3 ? X86_CR0_AM : 0 */ \
3998 & X86_CR0_AM) )
3999#else
4000# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
4001#endif
4002
4003#define TMPL_MEM_WITH_ATOMIC_MAPPING
4004
4005#define TMPL_MEM_TYPE uint8_t
4006#define TMPL_MEM_TYPE_ALIGN 0
4007#define TMPL_MEM_TYPE_SIZE 1
4008#define TMPL_MEM_FN_SUFF U8
4009#define TMPL_MEM_FMT_TYPE "%#04x"
4010#define TMPL_MEM_FMT_DESC "byte"
4011#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4012
4013#define TMPL_MEM_WITH_STACK
4014
4015#define TMPL_MEM_TYPE uint16_t
4016#define TMPL_MEM_TYPE_ALIGN 1
4017#define TMPL_MEM_TYPE_SIZE 2
4018#define TMPL_MEM_FN_SUFF U16
4019#define TMPL_MEM_FMT_TYPE "%#06x"
4020#define TMPL_MEM_FMT_DESC "word"
4021#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4022
4023#define TMPL_WITH_PUSH_SREG
4024#define TMPL_MEM_TYPE uint32_t
4025#define TMPL_MEM_TYPE_ALIGN 3
4026#define TMPL_MEM_TYPE_SIZE 4
4027#define TMPL_MEM_FN_SUFF U32
4028#define TMPL_MEM_FMT_TYPE "%#010x"
4029#define TMPL_MEM_FMT_DESC "dword"
4030#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4031#undef TMPL_WITH_PUSH_SREG
4032
4033#define TMPL_MEM_TYPE uint64_t
4034#define TMPL_MEM_TYPE_ALIGN 7
4035#define TMPL_MEM_TYPE_SIZE 8
4036#define TMPL_MEM_FN_SUFF U64
4037#define TMPL_MEM_FMT_TYPE "%#018RX64"
4038#define TMPL_MEM_FMT_DESC "qword"
4039#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4040
4041#undef TMPL_MEM_WITH_STACK
4042#undef TMPL_MEM_WITH_ATOMIC_MAPPING
4043
4044#define TMPL_MEM_NO_STORE
4045#define TMPL_MEM_NO_MAPPING
4046#define TMPL_MEM_TYPE uint64_t
4047#define TMPL_MEM_TYPE_ALIGN 15
4048#define TMPL_MEM_TYPE_SIZE 8
4049#define TMPL_MEM_FN_SUFF U64AlignedU128
4050#define TMPL_MEM_FMT_TYPE "%#018RX64"
4051#define TMPL_MEM_FMT_DESC "qword"
4052#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4053
4054#undef TMPL_MEM_NO_STORE
4055#undef TMPL_MEM_NO_MAPPING
4056
4057#define TMPL_MEM_TYPE RTFLOAT80U
4058#define TMPL_MEM_TYPE_ALIGN 7
4059#define TMPL_MEM_TYPE_SIZE 10
4060#define TMPL_MEM_FN_SUFF R80
4061#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
4062#define TMPL_MEM_FMT_DESC "tword"
4063#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4064
4065#define TMPL_MEM_TYPE RTPBCD80U
4066#define TMPL_MEM_TYPE_ALIGN 7 /** @todo RTPBCD80U alignment testcase */
4067#define TMPL_MEM_TYPE_SIZE 10
4068#define TMPL_MEM_FN_SUFF D80
4069#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
4070#define TMPL_MEM_FMT_DESC "tword"
4071#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4072
4073#define TMPL_MEM_WITH_ATOMIC_MAPPING
4074#define TMPL_MEM_TYPE RTUINT128U
4075#define TMPL_MEM_TYPE_ALIGN 15
4076#define TMPL_MEM_TYPE_SIZE 16
4077#define TMPL_MEM_FN_SUFF U128
4078#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
4079#define TMPL_MEM_FMT_DESC "dqword"
4080#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4081#undef TMPL_MEM_WITH_ATOMIC_MAPPING
4082
4083#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
4084
4085/** @} */
4086
4087
4088#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4089
4090/**
4091 * Gets CR0 fixed-0 bits in VMX operation.
4092 *
4093 * We do this rather than fetching what we report to the guest (in
4094 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
4095 * values regardless of whether unrestricted-guest feature is available on the CPU.
4096 *
4097 * @returns CR0 fixed-0 bits.
4098 * @param pVCpu The cross context virtual CPU structure.
4099 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
4100 * must be returned. When @c false, the CR0 fixed-0
4101 * bits for VMX root mode is returned.
4102 *
4103 */
4104DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
4105{
4106 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
4107
4108 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
4109 if ( fVmxNonRootMode
4110 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
4111 return VMX_V_CR0_FIXED0_UX;
4112 return VMX_V_CR0_FIXED0;
4113}
4114
4115
4116# ifdef XAPIC_OFF_END /* Requires VBox/apic.h to be included before IEMInline.h. */
4117/**
4118 * Sets virtual-APIC write emulation as pending.
4119 *
4120 * @param pVCpu The cross context virtual CPU structure.
4121 * @param offApic The offset in the virtual-APIC page that was written.
4122 */
4123DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
4124{
4125 Assert(offApic < XAPIC_OFF_END + 4);
4126
4127 /*
4128 * Record the currently updated APIC offset, as we need this later for figuring
4129 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4130 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4131 */
4132 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
4133
4134 /*
4135 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
4136 * virtualization or APIC-write emulation).
4137 */
4138 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4139 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4140}
4141# endif /* XAPIC_OFF_END */
4142
4143#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
4144
4145#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette