VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 100826

Last change on this file since 100826 was 100826, checked in by vboxsync, 16 months ago

VMM/IEM: Started refactoring IEM_MC_MEM_MAP into type and access specific variant so we can more efficiently apply inlined code using the TLB. Converted a bunch of 8-bit accesses in the one-byte opcode map. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 121.9 KB
Line 
1/* $Id: IEMInline.h 100826 2023-08-09 01:57:40Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
49 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
51 || rcStrict == VINF_VMX_VMEXIT
52#endif
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54 || rcStrict == VINF_SVM_VMEXIT
55#endif
56 )
57 {
58 if (pVCpu->iem.s.rcPassUp == VINF_SUCCESS)
59 rcStrict = VINF_SUCCESS;
60 else
61 {
62 pVCpu->iem.s.cRetPassUpStatus++;
63 rcStrict = pVCpu->iem.s.rcPassUp;
64 }
65 }
66 else if (RT_SUCCESS(rcStrict))
67 {
68 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
69 || rcStrict == VINF_IOM_R3_IOPORT_READ
70 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
71 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
72 || rcStrict == VINF_IOM_R3_MMIO_READ
73 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
74 || rcStrict == VINF_IOM_R3_MMIO_WRITE
75 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
76 || rcStrict == VINF_CPUM_R3_MSR_READ
77 || rcStrict == VINF_CPUM_R3_MSR_WRITE
78 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
79 || rcStrict == VINF_EM_RAW_TO_R3
80 || rcStrict == VINF_EM_TRIPLE_FAULT
81 || rcStrict == VINF_GIM_R3_HYPERCALL
82 /* raw-mode / virt handlers only: */
83 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
85 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
86 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
87 || rcStrict == VINF_SELM_SYNC_GDT
88 || rcStrict == VINF_CSAM_PENDING_ACTION
89 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
90 /* nested hw.virt codes: */
91 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
92 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
93 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
94/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
95 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
96 if (rcPassUp == VINF_SUCCESS)
97 pVCpu->iem.s.cRetInfStatuses++;
98 else if ( rcPassUp < VINF_EM_FIRST
99 || rcPassUp > VINF_EM_LAST
100 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
101 {
102 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
103 pVCpu->iem.s.cRetPassUpStatus++;
104 rcStrict = rcPassUp;
105 }
106 else
107 {
108 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
109 pVCpu->iem.s.cRetInfStatuses++;
110 }
111 }
112 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
113 pVCpu->iem.s.cRetAspectNotImplemented++;
114 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
115 pVCpu->iem.s.cRetInstrNotImplemented++;
116 else
117 pVCpu->iem.s.cRetErrStatuses++;
118 }
119 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
120 {
121 pVCpu->iem.s.cRetPassUpStatus++;
122 rcStrict = pVCpu->iem.s.rcPassUp;
123 }
124
125 /* Just clear it here as well. */
126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
127
128 return rcStrict;
129}
130
131
132/**
133 * Sets the pass up status.
134 *
135 * @returns VINF_SUCCESS.
136 * @param pVCpu The cross context virtual CPU structure of the
137 * calling thread.
138 * @param rcPassUp The pass up status. Must be informational.
139 * VINF_SUCCESS is not allowed.
140 */
141DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
142{
143 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
144
145 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
146 if (rcOldPassUp == VINF_SUCCESS)
147 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
148 /* If both are EM scheduling codes, use EM priority rules. */
149 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
150 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
151 {
152 if (rcPassUp < rcOldPassUp)
153 {
154 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
156 }
157 else
158 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
159 }
160 /* Override EM scheduling with specific status code. */
161 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
162 {
163 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
164 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
165 }
166 /* Don't override specific status code, first come first served. */
167 else
168 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
175 *
176 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
177 * reject expand down data segments and conforming code segments.
178 *
179 * ASSUMES that the CPU is in 32-bit mode.
180 *
181 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
182 * @param pVCpu The cross context virtual CPU structure of the
183 * calling thread.
184 * @sa iemCalc32BitFlatIndicatorEsDs
185 */
186DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
187{
188 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
189 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
190 | pVCpu->cpum.GstCtx.cs.Attr.u
191 | pVCpu->cpum.GstCtx.ss.Attr.u
192 | pVCpu->cpum.GstCtx.ds.Attr.u)
193 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
194 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
195 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
196 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
197 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
198 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
199 == 0
200 && ( pVCpu->cpum.GstCtx.es.u64Base
201 | pVCpu->cpum.GstCtx.cs.u64Base
202 | pVCpu->cpum.GstCtx.ss.u64Base
203 | pVCpu->cpum.GstCtx.ds.u64Base)
204 == 0
205 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
206 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
207}
208
209
210/**
211 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
212 * flat already.
213 *
214 * This is used by sysenter.
215 *
216 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
217 * @param pVCpu The cross context virtual CPU structure of the
218 * calling thread.
219 * @sa iemCalc32BitFlatIndicator
220 */
221DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
222{
223 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
224 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
225 | pVCpu->cpum.GstCtx.ds.Attr.u)
226 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
227 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
228 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
229 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
230 == 0
231 && ( pVCpu->cpum.GstCtx.es.u64Base
232 | pVCpu->cpum.GstCtx.ds.u64Base)
233 == 0
234 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
235 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
236}
237
238
239/**
240 * Calculates the IEM_F_MODE_XXX and CPL flags.
241 *
242 * @returns IEM_F_MODE_XXX
243 * @param pVCpu The cross context virtual CPU structure of the
244 * calling thread.
245 */
246DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
247{
248 /*
249 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
250 * here to try get this done as efficiently as possible.
251 */
252 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
253
254 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
255 {
256 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
257 {
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
259 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
260 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
261 {
262 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
263 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
264 }
265 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
266 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
267 fExec |= IEM_F_MODE_X86_64BIT;
268 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
269 fExec |= IEM_F_MODE_X86_16BIT_PROT;
270 else
271 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
272 return fExec;
273 }
274 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
275 }
276
277 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
278 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
279 {
280 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
281 return IEM_F_MODE_X86_16BIT;
282 return IEM_F_MODE_X86_16BIT_PRE_386;
283 }
284
285 /* 32-bit unreal mode. */
286 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
287}
288
289
290/**
291 * Calculates the AMD-V and VT-x related context flags.
292 *
293 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
294 * IEM_F_X86_CTX_VMX.
295 * @param pVCpu The cross context virtual CPU structure of the
296 * calling thread.
297 */
298DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
299{
300 /*
301 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
302 * and CPUMIsGuestInNestedHwvirtMode to some extent.
303 */
304 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
305
306 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
307 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
308 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
309 if (RT_LIKELY(!fTmp))
310 return 0; /* likely */
311
312 if (fTmp & X86_CR4_VMXE)
313 {
314 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
315 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
316 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
317 return IEM_F_X86_CTX_VMX;
318 }
319
320 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
321 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
322 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
323 return IEM_F_X86_CTX_SVM;
324}
325
326
327/**
328 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
329 *
330 * @returns IEM_F_BRK_PENDING_XXX or zero.
331 * @param pVCpu The cross context virtual CPU structure of the
332 * calling thread.
333 */
334DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
335{
336 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
337
338 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
339 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
340 return 0;
341 return iemCalcExecDbgFlagsSlow(pVCpu);
342}
343
344/**
345 * Calculates the the IEM_F_XXX flags.
346 *
347 * @returns IEM_F_XXX combination match the current CPU state.
348 * @param pVCpu The cross context virtual CPU structure of the
349 * calling thread.
350 */
351DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
352{
353 return iemCalcExecModeAndCplFlags(pVCpu)
354 | iemCalcExecHwVirtFlags(pVCpu)
355 /* SMM is not yet implemented */
356 | iemCalcExecDbgFlags(pVCpu)
357 ;
358}
359
360
361/**
362 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
363 *
364 * @param pVCpu The cross context virtual CPU structure of the
365 * calling thread.
366 */
367DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplFlags(PVMCPUCC pVCpu)
368{
369 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
370 | iemCalcExecModeAndCplFlags(pVCpu);
371}
372
373
374/**
375 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
376 *
377 * @param pVCpu The cross context virtual CPU structure of the
378 * calling thread.
379 */
380DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
381{
382 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
383 | iemCalcExecDbgFlags(pVCpu);
384}
385
386
387#ifndef IEM_WITH_OPAQUE_DECODER_STATE
388
389# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
390/**
391 * Initializes the execution state.
392 *
393 * @param pVCpu The cross context virtual CPU structure of the
394 * calling thread.
395 * @param fExecOpts Optional execution flags:
396 * - IEM_F_BYPASS_HANDLERS
397 * - IEM_F_X86_DISREGARD_LOCK
398 *
399 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
400 * side-effects in strict builds.
401 */
402DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
403{
404 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
405 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
406 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
407 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
408 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
409 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
410 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
412 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
414
415 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
416 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
417 pVCpu->iem.s.cActiveMappings = 0;
418 pVCpu->iem.s.iNextMapping = 0;
419
420# ifdef VBOX_STRICT
421 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
422 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
423 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
424 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
425 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
426 pVCpu->iem.s.uRexReg = 127;
427 pVCpu->iem.s.uRexB = 127;
428 pVCpu->iem.s.offModRm = 127;
429 pVCpu->iem.s.uRexIndex = 127;
430 pVCpu->iem.s.iEffSeg = 127;
431 pVCpu->iem.s.idxPrefix = 127;
432 pVCpu->iem.s.uVex3rdReg = 127;
433 pVCpu->iem.s.uVexLength = 127;
434 pVCpu->iem.s.fEvexStuff = 127;
435 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
436# ifdef IEM_WITH_CODE_TLB
437 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
438 pVCpu->iem.s.pbInstrBuf = NULL;
439 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
440 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
441 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
442 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
443# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
444 pVCpu->iem.s.offOpcode = 127;
445# endif
446# else
447 pVCpu->iem.s.offOpcode = 127;
448 pVCpu->iem.s.cbOpcode = 127;
449# endif
450# endif /* VBOX_STRICT */
451}
452# endif /* VBOX_INCLUDED_vmm_dbgf_h */
453
454
455# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
456/**
457 * Performs a minimal reinitialization of the execution state.
458 *
459 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
460 * 'world-switch' types operations on the CPU. Currently only nested
461 * hardware-virtualization uses it.
462 *
463 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
464 * @param cbInstr The instruction length (for flushing).
465 */
466DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
467{
468 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
469 iemOpcodeFlushHeavy(pVCpu, cbInstr);
470}
471# endif
472
473
474/**
475 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
476 *
477 * @param pVCpu The cross context virtual CPU structure of the
478 * calling thread.
479 */
480DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
481{
482 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
483# ifdef VBOX_STRICT
484# ifdef IEM_WITH_CODE_TLB
485 NOREF(pVCpu);
486# else
487 pVCpu->iem.s.cbOpcode = 0;
488# endif
489# else
490 NOREF(pVCpu);
491# endif
492}
493
494
495/**
496 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
497 *
498 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
499 *
500 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
502 * @param rcStrict The status code to fiddle.
503 */
504DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
505{
506 iemUninitExec(pVCpu);
507 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
508}
509
510
511/**
512 * Macro used by the IEMExec* method to check the given instruction length.
513 *
514 * Will return on failure!
515 *
516 * @param a_cbInstr The given instruction length.
517 * @param a_cbMin The minimum length.
518 */
519# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
520 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
521 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
522
523
524# ifndef IEM_WITH_SETJMP
525
526/**
527 * Fetches the first opcode byte.
528 *
529 * @returns Strict VBox status code.
530 * @param pVCpu The cross context virtual CPU structure of the
531 * calling thread.
532 * @param pu8 Where to return the opcode byte.
533 */
534DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
535{
536 /*
537 * Check for hardware instruction breakpoints.
538 */
539 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
540 { /* likely */ }
541 else
542 {
543 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
544 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
545 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
546 { /* likely */ }
547 else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
548 return iemRaiseDebugException(pVCpu);
549 else
550 return rcStrict;
551 }
552
553 /*
554 * Fetch the first opcode byte.
555 */
556 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
557 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
558 {
559 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
560 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
561 return VINF_SUCCESS;
562 }
563 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
564}
565
566# else /* IEM_WITH_SETJMP */
567
568/**
569 * Fetches the first opcode byte, longjmp on error.
570 *
571 * @returns The opcode byte.
572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
573 */
574DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
575{
576 /*
577 * Check for hardware instruction breakpoints.
578 */
579 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
580 { /* likely */ }
581 else
582 {
583 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
584 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
585 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
586 { /* likely */ }
587 else
588 {
589 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
590 rcStrict = iemRaiseDebugException(pVCpu);
591 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
592 }
593 }
594
595 /*
596 * Fetch the first opcode byte.
597 */
598# ifdef IEM_WITH_CODE_TLB
599 uint8_t bRet;
600 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
601 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
602 if (RT_LIKELY( pbBuf != NULL
603 && offBuf < pVCpu->iem.s.cbInstrBuf))
604 {
605 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
606 bRet = pbBuf[offBuf];
607 }
608 else
609 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
610# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
611 Assert(pVCpu->iem.s.offOpcode == 0);
612 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
613# endif
614 return bRet;
615
616# else /* !IEM_WITH_CODE_TLB */
617 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
618 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
619 {
620 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
621 return pVCpu->iem.s.abOpcode[offOpcode];
622 }
623 return iemOpcodeGetNextU8SlowJmp(pVCpu);
624# endif
625}
626
627# endif /* IEM_WITH_SETJMP */
628
629/**
630 * Fetches the first opcode byte, returns/throws automatically on failure.
631 *
632 * @param a_pu8 Where to return the opcode byte.
633 * @remark Implicitly references pVCpu.
634 */
635# ifndef IEM_WITH_SETJMP
636# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
637 do \
638 { \
639 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
640 if (rcStrict2 == VINF_SUCCESS) \
641 { /* likely */ } \
642 else \
643 return rcStrict2; \
644 } while (0)
645# else
646# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
647# endif /* IEM_WITH_SETJMP */
648
649
650# ifndef IEM_WITH_SETJMP
651
652/**
653 * Fetches the next opcode byte.
654 *
655 * @returns Strict VBox status code.
656 * @param pVCpu The cross context virtual CPU structure of the
657 * calling thread.
658 * @param pu8 Where to return the opcode byte.
659 */
660DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
661{
662 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
663 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
664 {
665 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
666 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
667 return VINF_SUCCESS;
668 }
669 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
670}
671
672# else /* IEM_WITH_SETJMP */
673
674/**
675 * Fetches the next opcode byte, longjmp on error.
676 *
677 * @returns The opcode byte.
678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
679 */
680DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
681{
682# ifdef IEM_WITH_CODE_TLB
683 uint8_t bRet;
684 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
685 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
686 if (RT_LIKELY( pbBuf != NULL
687 && offBuf < pVCpu->iem.s.cbInstrBuf))
688 {
689 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
690 bRet = pbBuf[offBuf];
691 }
692 else
693 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
694# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
695 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
696 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
697# endif
698 return bRet;
699
700# else /* !IEM_WITH_CODE_TLB */
701 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
702 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
703 {
704 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
705 return pVCpu->iem.s.abOpcode[offOpcode];
706 }
707 return iemOpcodeGetNextU8SlowJmp(pVCpu);
708# endif
709}
710
711# endif /* IEM_WITH_SETJMP */
712
713/**
714 * Fetches the next opcode byte, returns automatically on failure.
715 *
716 * @param a_pu8 Where to return the opcode byte.
717 * @remark Implicitly references pVCpu.
718 */
719# ifndef IEM_WITH_SETJMP
720# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
721 do \
722 { \
723 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
724 if (rcStrict2 == VINF_SUCCESS) \
725 { /* likely */ } \
726 else \
727 return rcStrict2; \
728 } while (0)
729# else
730# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
731# endif /* IEM_WITH_SETJMP */
732
733
734# ifndef IEM_WITH_SETJMP
735/**
736 * Fetches the next signed byte from the opcode stream.
737 *
738 * @returns Strict VBox status code.
739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
740 * @param pi8 Where to return the signed byte.
741 */
742DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
743{
744 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
745}
746# endif /* !IEM_WITH_SETJMP */
747
748
749/**
750 * Fetches the next signed byte from the opcode stream, returning automatically
751 * on failure.
752 *
753 * @param a_pi8 Where to return the signed byte.
754 * @remark Implicitly references pVCpu.
755 */
756# ifndef IEM_WITH_SETJMP
757# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
758 do \
759 { \
760 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
761 if (rcStrict2 != VINF_SUCCESS) \
762 return rcStrict2; \
763 } while (0)
764# else /* IEM_WITH_SETJMP */
765# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
766
767# endif /* IEM_WITH_SETJMP */
768
769
770# ifndef IEM_WITH_SETJMP
771/**
772 * Fetches the next signed byte from the opcode stream, extending it to
773 * unsigned 16-bit.
774 *
775 * @returns Strict VBox status code.
776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
777 * @param pu16 Where to return the unsigned word.
778 */
779DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
780{
781 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
782 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
783 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
784
785 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
786 pVCpu->iem.s.offOpcode = offOpcode + 1;
787 return VINF_SUCCESS;
788}
789# endif /* !IEM_WITH_SETJMP */
790
791/**
792 * Fetches the next signed byte from the opcode stream and sign-extending it to
793 * a word, returning automatically on failure.
794 *
795 * @param a_pu16 Where to return the word.
796 * @remark Implicitly references pVCpu.
797 */
798# ifndef IEM_WITH_SETJMP
799# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
800 do \
801 { \
802 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
803 if (rcStrict2 != VINF_SUCCESS) \
804 return rcStrict2; \
805 } while (0)
806# else
807# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
808# endif
809
810# ifndef IEM_WITH_SETJMP
811/**
812 * Fetches the next signed byte from the opcode stream, extending it to
813 * unsigned 32-bit.
814 *
815 * @returns Strict VBox status code.
816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
817 * @param pu32 Where to return the unsigned dword.
818 */
819DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
820{
821 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
822 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
823 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
824
825 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
826 pVCpu->iem.s.offOpcode = offOpcode + 1;
827 return VINF_SUCCESS;
828}
829# endif /* !IEM_WITH_SETJMP */
830
831/**
832 * Fetches the next signed byte from the opcode stream and sign-extending it to
833 * a word, returning automatically on failure.
834 *
835 * @param a_pu32 Where to return the word.
836 * @remark Implicitly references pVCpu.
837 */
838# ifndef IEM_WITH_SETJMP
839# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
840 do \
841 { \
842 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
843 if (rcStrict2 != VINF_SUCCESS) \
844 return rcStrict2; \
845 } while (0)
846# else
847# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
848# endif
849
850
851# ifndef IEM_WITH_SETJMP
852/**
853 * Fetches the next signed byte from the opcode stream, extending it to
854 * unsigned 64-bit.
855 *
856 * @returns Strict VBox status code.
857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
858 * @param pu64 Where to return the unsigned qword.
859 */
860DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
861{
862 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
863 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
864 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
865
866 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
867 pVCpu->iem.s.offOpcode = offOpcode + 1;
868 return VINF_SUCCESS;
869}
870# endif /* !IEM_WITH_SETJMP */
871
872/**
873 * Fetches the next signed byte from the opcode stream and sign-extending it to
874 * a word, returning automatically on failure.
875 *
876 * @param a_pu64 Where to return the word.
877 * @remark Implicitly references pVCpu.
878 */
879# ifndef IEM_WITH_SETJMP
880# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
881 do \
882 { \
883 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
884 if (rcStrict2 != VINF_SUCCESS) \
885 return rcStrict2; \
886 } while (0)
887# else
888# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
889# endif
890
891
892# ifndef IEM_WITH_SETJMP
893
894/**
895 * Fetches the next opcode word.
896 *
897 * @returns Strict VBox status code.
898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
899 * @param pu16 Where to return the opcode word.
900 */
901DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
902{
903 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
904 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
905 {
906 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
907# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
908 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
909# else
910 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
911# endif
912 return VINF_SUCCESS;
913 }
914 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
915}
916
917# else /* IEM_WITH_SETJMP */
918
919/**
920 * Fetches the next opcode word, longjmp on error.
921 *
922 * @returns The opcode word.
923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
924 */
925DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
926{
927# ifdef IEM_WITH_CODE_TLB
928 uint16_t u16Ret;
929 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
930 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
931 if (RT_LIKELY( pbBuf != NULL
932 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
933 {
934 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
935# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
936 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
937# else
938 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
939# endif
940 }
941 else
942 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
943
944# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
945 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
946 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
947# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
948 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
949# else
950 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
951 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
952# endif
953 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
954# endif
955
956 return u16Ret;
957
958# else /* !IEM_WITH_CODE_TLB */
959 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
960 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
961 {
962 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
963# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
964 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
965# else
966 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
967# endif
968 }
969 return iemOpcodeGetNextU16SlowJmp(pVCpu);
970# endif /* !IEM_WITH_CODE_TLB */
971}
972
973# endif /* IEM_WITH_SETJMP */
974
975/**
976 * Fetches the next opcode word, returns automatically on failure.
977 *
978 * @param a_pu16 Where to return the opcode word.
979 * @remark Implicitly references pVCpu.
980 */
981# ifndef IEM_WITH_SETJMP
982# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
983 do \
984 { \
985 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
986 if (rcStrict2 != VINF_SUCCESS) \
987 return rcStrict2; \
988 } while (0)
989# else
990# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
991# endif
992
993# ifndef IEM_WITH_SETJMP
994/**
995 * Fetches the next opcode word, zero extending it to a double word.
996 *
997 * @returns Strict VBox status code.
998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
999 * @param pu32 Where to return the opcode double word.
1000 */
1001DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1002{
1003 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1004 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1005 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1006
1007 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1008 pVCpu->iem.s.offOpcode = offOpcode + 2;
1009 return VINF_SUCCESS;
1010}
1011# endif /* !IEM_WITH_SETJMP */
1012
1013/**
1014 * Fetches the next opcode word and zero extends it to a double word, returns
1015 * automatically on failure.
1016 *
1017 * @param a_pu32 Where to return the opcode double word.
1018 * @remark Implicitly references pVCpu.
1019 */
1020# ifndef IEM_WITH_SETJMP
1021# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1022 do \
1023 { \
1024 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1025 if (rcStrict2 != VINF_SUCCESS) \
1026 return rcStrict2; \
1027 } while (0)
1028# else
1029# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1030# endif
1031
1032# ifndef IEM_WITH_SETJMP
1033/**
1034 * Fetches the next opcode word, zero extending it to a quad word.
1035 *
1036 * @returns Strict VBox status code.
1037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1038 * @param pu64 Where to return the opcode quad word.
1039 */
1040DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1041{
1042 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1043 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1044 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1045
1046 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1047 pVCpu->iem.s.offOpcode = offOpcode + 2;
1048 return VINF_SUCCESS;
1049}
1050# endif /* !IEM_WITH_SETJMP */
1051
1052/**
1053 * Fetches the next opcode word and zero extends it to a quad word, returns
1054 * automatically on failure.
1055 *
1056 * @param a_pu64 Where to return the opcode quad word.
1057 * @remark Implicitly references pVCpu.
1058 */
1059# ifndef IEM_WITH_SETJMP
1060# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1061 do \
1062 { \
1063 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1064 if (rcStrict2 != VINF_SUCCESS) \
1065 return rcStrict2; \
1066 } while (0)
1067# else
1068# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1069# endif
1070
1071
1072# ifndef IEM_WITH_SETJMP
1073/**
1074 * Fetches the next signed word from the opcode stream.
1075 *
1076 * @returns Strict VBox status code.
1077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1078 * @param pi16 Where to return the signed word.
1079 */
1080DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1081{
1082 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1083}
1084# endif /* !IEM_WITH_SETJMP */
1085
1086
1087/**
1088 * Fetches the next signed word from the opcode stream, returning automatically
1089 * on failure.
1090 *
1091 * @param a_pi16 Where to return the signed word.
1092 * @remark Implicitly references pVCpu.
1093 */
1094# ifndef IEM_WITH_SETJMP
1095# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1096 do \
1097 { \
1098 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1099 if (rcStrict2 != VINF_SUCCESS) \
1100 return rcStrict2; \
1101 } while (0)
1102# else
1103# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1104# endif
1105
1106# ifndef IEM_WITH_SETJMP
1107
1108/**
1109 * Fetches the next opcode dword.
1110 *
1111 * @returns Strict VBox status code.
1112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1113 * @param pu32 Where to return the opcode double word.
1114 */
1115DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1116{
1117 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1118 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1119 {
1120 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1121# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1122 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1123# else
1124 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1125 pVCpu->iem.s.abOpcode[offOpcode + 1],
1126 pVCpu->iem.s.abOpcode[offOpcode + 2],
1127 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1128# endif
1129 return VINF_SUCCESS;
1130 }
1131 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1132}
1133
1134# else /* IEM_WITH_SETJMP */
1135
1136/**
1137 * Fetches the next opcode dword, longjmp on error.
1138 *
1139 * @returns The opcode dword.
1140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1141 */
1142DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1143{
1144# ifdef IEM_WITH_CODE_TLB
1145 uint32_t u32Ret;
1146 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1147 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1148 if (RT_LIKELY( pbBuf != NULL
1149 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1150 {
1151 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1152# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1153 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
1154# else
1155 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1156 pbBuf[offBuf + 1],
1157 pbBuf[offBuf + 2],
1158 pbBuf[offBuf + 3]);
1159# endif
1160 }
1161 else
1162 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
1163
1164# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1165 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1166 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
1167# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1168 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
1169# else
1170 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
1171 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
1172 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
1173 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
1174# endif
1175 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
1176# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1177
1178 return u32Ret;
1179
1180# else /* !IEM_WITH_CODE_TLB */
1181 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1182 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1183 {
1184 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1185# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1186 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1187# else
1188 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1189 pVCpu->iem.s.abOpcode[offOpcode + 1],
1190 pVCpu->iem.s.abOpcode[offOpcode + 2],
1191 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1192# endif
1193 }
1194 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1195# endif
1196}
1197
1198# endif /* IEM_WITH_SETJMP */
1199
1200/**
1201 * Fetches the next opcode dword, returns automatically on failure.
1202 *
1203 * @param a_pu32 Where to return the opcode dword.
1204 * @remark Implicitly references pVCpu.
1205 */
1206# ifndef IEM_WITH_SETJMP
1207# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1208 do \
1209 { \
1210 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1211 if (rcStrict2 != VINF_SUCCESS) \
1212 return rcStrict2; \
1213 } while (0)
1214# else
1215# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1216# endif
1217
1218# ifndef IEM_WITH_SETJMP
1219/**
1220 * Fetches the next opcode dword, zero extending it to a quad word.
1221 *
1222 * @returns Strict VBox status code.
1223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1224 * @param pu64 Where to return the opcode quad word.
1225 */
1226DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1227{
1228 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1229 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1230 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1231
1232 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1233 pVCpu->iem.s.abOpcode[offOpcode + 1],
1234 pVCpu->iem.s.abOpcode[offOpcode + 2],
1235 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1236 pVCpu->iem.s.offOpcode = offOpcode + 4;
1237 return VINF_SUCCESS;
1238}
1239# endif /* !IEM_WITH_SETJMP */
1240
1241/**
1242 * Fetches the next opcode dword and zero extends it to a quad word, returns
1243 * automatically on failure.
1244 *
1245 * @param a_pu64 Where to return the opcode quad word.
1246 * @remark Implicitly references pVCpu.
1247 */
1248# ifndef IEM_WITH_SETJMP
1249# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1250 do \
1251 { \
1252 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1253 if (rcStrict2 != VINF_SUCCESS) \
1254 return rcStrict2; \
1255 } while (0)
1256# else
1257# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1258# endif
1259
1260
1261# ifndef IEM_WITH_SETJMP
1262/**
1263 * Fetches the next signed double word from the opcode stream.
1264 *
1265 * @returns Strict VBox status code.
1266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1267 * @param pi32 Where to return the signed double word.
1268 */
1269DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1270{
1271 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1272}
1273# endif
1274
1275/**
1276 * Fetches the next signed double word from the opcode stream, returning
1277 * automatically on failure.
1278 *
1279 * @param a_pi32 Where to return the signed double word.
1280 * @remark Implicitly references pVCpu.
1281 */
1282# ifndef IEM_WITH_SETJMP
1283# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1284 do \
1285 { \
1286 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1287 if (rcStrict2 != VINF_SUCCESS) \
1288 return rcStrict2; \
1289 } while (0)
1290# else
1291# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1292# endif
1293
1294# ifndef IEM_WITH_SETJMP
1295/**
1296 * Fetches the next opcode dword, sign extending it into a quad word.
1297 *
1298 * @returns Strict VBox status code.
1299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1300 * @param pu64 Where to return the opcode quad word.
1301 */
1302DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1303{
1304 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1305 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1306 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1307
1308 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1309 pVCpu->iem.s.abOpcode[offOpcode + 1],
1310 pVCpu->iem.s.abOpcode[offOpcode + 2],
1311 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1312 *pu64 = (uint64_t)(int64_t)i32;
1313 pVCpu->iem.s.offOpcode = offOpcode + 4;
1314 return VINF_SUCCESS;
1315}
1316# endif /* !IEM_WITH_SETJMP */
1317
1318/**
1319 * Fetches the next opcode double word and sign extends it to a quad word,
1320 * returns automatically on failure.
1321 *
1322 * @param a_pu64 Where to return the opcode quad word.
1323 * @remark Implicitly references pVCpu.
1324 */
1325# ifndef IEM_WITH_SETJMP
1326# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1327 do \
1328 { \
1329 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1330 if (rcStrict2 != VINF_SUCCESS) \
1331 return rcStrict2; \
1332 } while (0)
1333# else
1334# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1335# endif
1336
1337# ifndef IEM_WITH_SETJMP
1338
1339/**
1340 * Fetches the next opcode qword.
1341 *
1342 * @returns Strict VBox status code.
1343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1344 * @param pu64 Where to return the opcode qword.
1345 */
1346DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1347{
1348 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1349 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1350 {
1351# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1352 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1353# else
1354 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1355 pVCpu->iem.s.abOpcode[offOpcode + 1],
1356 pVCpu->iem.s.abOpcode[offOpcode + 2],
1357 pVCpu->iem.s.abOpcode[offOpcode + 3],
1358 pVCpu->iem.s.abOpcode[offOpcode + 4],
1359 pVCpu->iem.s.abOpcode[offOpcode + 5],
1360 pVCpu->iem.s.abOpcode[offOpcode + 6],
1361 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1362# endif
1363 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1364 return VINF_SUCCESS;
1365 }
1366 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1367}
1368
1369# else /* IEM_WITH_SETJMP */
1370
1371/**
1372 * Fetches the next opcode qword, longjmp on error.
1373 *
1374 * @returns The opcode qword.
1375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1376 */
1377DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1378{
1379# ifdef IEM_WITH_CODE_TLB
1380 uint64_t u64Ret;
1381 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1382 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1383 if (RT_LIKELY( pbBuf != NULL
1384 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1385 {
1386 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1387# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1388 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
1389# else
1390 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1391 pbBuf[offBuf + 1],
1392 pbBuf[offBuf + 2],
1393 pbBuf[offBuf + 3],
1394 pbBuf[offBuf + 4],
1395 pbBuf[offBuf + 5],
1396 pbBuf[offBuf + 6],
1397 pbBuf[offBuf + 7]);
1398# endif
1399 }
1400 else
1401 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
1402
1403# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1404 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1405 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
1406# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1407 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
1408# else
1409 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
1410 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
1411 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
1412 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
1413 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
1414 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
1415 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
1416 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
1417# endif
1418 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
1419# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1420
1421 return u64Ret;
1422
1423# else /* !IEM_WITH_CODE_TLB */
1424 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1425 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1426 {
1427 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1428# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1429 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1430# else
1431 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1432 pVCpu->iem.s.abOpcode[offOpcode + 1],
1433 pVCpu->iem.s.abOpcode[offOpcode + 2],
1434 pVCpu->iem.s.abOpcode[offOpcode + 3],
1435 pVCpu->iem.s.abOpcode[offOpcode + 4],
1436 pVCpu->iem.s.abOpcode[offOpcode + 5],
1437 pVCpu->iem.s.abOpcode[offOpcode + 6],
1438 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1439# endif
1440 }
1441 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1442# endif /* !IEM_WITH_CODE_TLB */
1443}
1444
1445# endif /* IEM_WITH_SETJMP */
1446
1447/**
1448 * Fetches the next opcode quad word, returns automatically on failure.
1449 *
1450 * @param a_pu64 Where to return the opcode quad word.
1451 * @remark Implicitly references pVCpu.
1452 */
1453# ifndef IEM_WITH_SETJMP
1454# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1455 do \
1456 { \
1457 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1458 if (rcStrict2 != VINF_SUCCESS) \
1459 return rcStrict2; \
1460 } while (0)
1461# else
1462# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1463# endif
1464
1465/**
1466 * For fetching the opcode bytes for an ModR/M effective address, but throw
1467 * away the result.
1468 *
1469 * This is used when decoding undefined opcodes and such where we want to avoid
1470 * unnecessary MC blocks.
1471 *
1472 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
1473 * used instead. At least for now...
1474 */
1475# ifndef IEM_WITH_SETJMP
1476# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1477 RTGCPTR GCPtrEff; \
1478 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1479 if (rcStrict != VINF_SUCCESS) \
1480 return rcStrict; \
1481 } while (0)
1482# else
1483# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1484 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1485 } while (0)
1486# endif
1487
1488#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1489
1490
1491/** @name Misc Worker Functions.
1492 * @{
1493 */
1494
1495/**
1496 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1497 * not (kind of obsolete now).
1498 *
1499 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1500 */
1501#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1502
1503/**
1504 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1505 *
1506 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1507 * @param a_fEfl The new EFLAGS.
1508 */
1509#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1510
1511
1512/**
1513 * Loads a NULL data selector into a selector register, both the hidden and
1514 * visible parts, in protected mode.
1515 *
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pSReg Pointer to the segment register.
1518 * @param uRpl The RPL.
1519 */
1520DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1521{
1522 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1523 * data selector in protected mode. */
1524 pSReg->Sel = uRpl;
1525 pSReg->ValidSel = uRpl;
1526 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1527 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1528 {
1529 /* VT-x (Intel 3960x) observed doing something like this. */
1530 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1531 pSReg->u32Limit = UINT32_MAX;
1532 pSReg->u64Base = 0;
1533 }
1534 else
1535 {
1536 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1537 pSReg->u32Limit = 0;
1538 pSReg->u64Base = 0;
1539 }
1540}
1541
1542/** @} */
1543
1544
1545/*
1546 *
1547 * Helpers routines.
1548 * Helpers routines.
1549 * Helpers routines.
1550 *
1551 */
1552
1553#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1554
1555/**
1556 * Recalculates the effective operand size.
1557 *
1558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1559 */
1560DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1561{
1562 switch (IEM_GET_CPU_MODE(pVCpu))
1563 {
1564 case IEMMODE_16BIT:
1565 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1566 break;
1567 case IEMMODE_32BIT:
1568 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1569 break;
1570 case IEMMODE_64BIT:
1571 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1572 {
1573 case 0:
1574 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1575 break;
1576 case IEM_OP_PRF_SIZE_OP:
1577 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1578 break;
1579 case IEM_OP_PRF_SIZE_REX_W:
1580 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1581 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1582 break;
1583 }
1584 break;
1585 default:
1586 AssertFailed();
1587 }
1588}
1589
1590
1591/**
1592 * Sets the default operand size to 64-bit and recalculates the effective
1593 * operand size.
1594 *
1595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1596 */
1597DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1598{
1599 Assert(IEM_IS_64BIT_CODE(pVCpu));
1600 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1601 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1602 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1603 else
1604 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1605}
1606
1607
1608/**
1609 * Sets the default operand size to 64-bit and recalculates the effective
1610 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1611 *
1612 * This is for the relative jumps.
1613 *
1614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1615 */
1616DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1617{
1618 Assert(IEM_IS_64BIT_CODE(pVCpu));
1619 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1620 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1621 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1622 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1623 else
1624 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1625}
1626
1627#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1628
1629
1630
1631/** @name Register Access.
1632 * @{
1633 */
1634
1635/**
1636 * Gets a reference (pointer) to the specified hidden segment register.
1637 *
1638 * @returns Hidden register reference.
1639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1640 * @param iSegReg The segment register.
1641 */
1642DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1643{
1644 Assert(iSegReg < X86_SREG_COUNT);
1645 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1646 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1647
1648 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1649 return pSReg;
1650}
1651
1652
1653/**
1654 * Ensures that the given hidden segment register is up to date.
1655 *
1656 * @returns Hidden register reference.
1657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1658 * @param pSReg The segment register.
1659 */
1660DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1661{
1662 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1663 NOREF(pVCpu);
1664 return pSReg;
1665}
1666
1667
1668/**
1669 * Gets a reference (pointer) to the specified segment register (the selector
1670 * value).
1671 *
1672 * @returns Pointer to the selector variable.
1673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1674 * @param iSegReg The segment register.
1675 */
1676DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1677{
1678 Assert(iSegReg < X86_SREG_COUNT);
1679 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1680 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1681}
1682
1683
1684/**
1685 * Fetches the selector value of a segment register.
1686 *
1687 * @returns The selector value.
1688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1689 * @param iSegReg The segment register.
1690 */
1691DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1692{
1693 Assert(iSegReg < X86_SREG_COUNT);
1694 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1695 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1696}
1697
1698
1699/**
1700 * Fetches the base address value of a segment register.
1701 *
1702 * @returns The selector value.
1703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1704 * @param iSegReg The segment register.
1705 */
1706DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1707{
1708 Assert(iSegReg < X86_SREG_COUNT);
1709 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1710 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1711}
1712
1713
1714/**
1715 * Gets a reference (pointer) to the specified general purpose register.
1716 *
1717 * @returns Register reference.
1718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1719 * @param iReg The general purpose register.
1720 */
1721DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1722{
1723 Assert(iReg < 16);
1724 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1725}
1726
1727
1728#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1729/**
1730 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1731 *
1732 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1733 *
1734 * @returns Register reference.
1735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1736 * @param iReg The register.
1737 */
1738DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1739{
1740 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1741 {
1742 Assert(iReg < 16);
1743 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1744 }
1745 /* high 8-bit register. */
1746 Assert(iReg < 8);
1747 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1748}
1749#endif
1750
1751
1752/**
1753 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1754 * alternative version with extended (20) register index.
1755 *
1756 * @returns Register reference.
1757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1758 * @param iRegEx The register. The 16 first are regular ones,
1759 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1760 */
1761DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1762{
1763 /** @todo This could be done by double indexing on little endian hosts:
1764 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
1765 if (iRegEx < 16)
1766 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1767
1768 /* high 8-bit register. */
1769 Assert(iRegEx < 20);
1770 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1771}
1772
1773
1774/**
1775 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1776 *
1777 * @returns Register reference.
1778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1779 * @param iReg The register.
1780 */
1781DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1782{
1783 Assert(iReg < 16);
1784 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1785}
1786
1787
1788/**
1789 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1790 *
1791 * @returns Register reference.
1792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1793 * @param iReg The register.
1794 */
1795DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1796{
1797 Assert(iReg < 16);
1798 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1799}
1800
1801
1802/**
1803 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1804 *
1805 * @returns Register reference.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param iReg The register.
1808 */
1809DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1810{
1811 Assert(iReg < 16);
1812 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1813}
1814
1815
1816/**
1817 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1818 *
1819 * @returns Register reference.
1820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1821 * @param iReg The register.
1822 */
1823DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1824{
1825 Assert(iReg < 64);
1826 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1827}
1828
1829
1830/**
1831 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1832 *
1833 * @returns Register reference.
1834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1835 * @param iReg The register.
1836 */
1837DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1838{
1839 Assert(iReg < 16);
1840 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1841}
1842
1843
1844/**
1845 * Gets a reference (pointer) to the specified segment register's base address.
1846 *
1847 * @returns Segment register base address reference.
1848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1849 * @param iSegReg The segment selector.
1850 */
1851DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1852{
1853 Assert(iSegReg < X86_SREG_COUNT);
1854 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1855 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1856}
1857
1858
1859#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1860/**
1861 * Fetches the value of a 8-bit general purpose register.
1862 *
1863 * @returns The register value.
1864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1865 * @param iReg The register.
1866 */
1867DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1868{
1869 return *iemGRegRefU8(pVCpu, iReg);
1870}
1871#endif
1872
1873
1874/**
1875 * Fetches the value of a 8-bit general purpose register, alternative version
1876 * with extended (20) register index.
1877
1878 * @returns The register value.
1879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1880 * @param iRegEx The register. The 16 first are regular ones,
1881 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1882 */
1883DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1884{
1885 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1886}
1887
1888
1889/**
1890 * Fetches the value of a 16-bit general purpose register.
1891 *
1892 * @returns The register value.
1893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1894 * @param iReg The register.
1895 */
1896DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1897{
1898 Assert(iReg < 16);
1899 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1900}
1901
1902
1903/**
1904 * Fetches the value of a 32-bit general purpose register.
1905 *
1906 * @returns The register value.
1907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1908 * @param iReg The register.
1909 */
1910DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1911{
1912 Assert(iReg < 16);
1913 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1914}
1915
1916
1917/**
1918 * Fetches the value of a 64-bit general purpose register.
1919 *
1920 * @returns The register value.
1921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1922 * @param iReg The register.
1923 */
1924DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1925{
1926 Assert(iReg < 16);
1927 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1928}
1929
1930
1931/**
1932 * Stores a 16-bit value to a general purpose register.
1933 *
1934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1935 * @param iReg The register.
1936 * @param uValue The value to store.
1937 */
1938DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
1939{
1940 Assert(iReg < 16);
1941 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
1942}
1943
1944
1945/**
1946 * Stores a 32-bit value to a general purpose register, implicitly clearing high
1947 * values.
1948 *
1949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1950 * @param iReg The register.
1951 * @param uValue The value to store.
1952 */
1953DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
1954{
1955 Assert(iReg < 16);
1956 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1957}
1958
1959
1960/**
1961 * Stores a 64-bit value to a general purpose register.
1962 *
1963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1964 * @param iReg The register.
1965 * @param uValue The value to store.
1966 */
1967DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
1968{
1969 Assert(iReg < 16);
1970 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1971}
1972
1973
1974/**
1975 * Get the address of the top of the stack.
1976 *
1977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1978 */
1979DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1980{
1981 if (IEM_IS_64BIT_CODE(pVCpu))
1982 return pVCpu->cpum.GstCtx.rsp;
1983 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1984 return pVCpu->cpum.GstCtx.esp;
1985 return pVCpu->cpum.GstCtx.sp;
1986}
1987
1988
1989/**
1990 * Updates the RIP/EIP/IP to point to the next instruction.
1991 *
1992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1993 * @param cbInstr The number of bytes to add.
1994 */
1995DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1996{
1997 /*
1998 * Advance RIP.
1999 *
2000 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
2001 * while in all other modes except LM64 the updates are 32-bit. This means
2002 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
2003 * 4GB and 64KB rollovers, and decide whether anything needs masking.
2004 *
2005 * See PC wrap around tests in bs3-cpu-weird-1.
2006 */
2007 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
2008 uint64_t const uRipNext = uRipPrev + cbInstr;
2009 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
2010 || IEM_IS_64BIT_CODE(pVCpu)))
2011 pVCpu->cpum.GstCtx.rip = uRipNext;
2012 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
2013 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
2014 else
2015 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
2016}
2017
2018
2019/**
2020 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
2021 * following EFLAGS bits are set:
2022 * - X86_EFL_RF - clear it.
2023 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
2024 * - X86_EFL_TF - generate single step \#DB trap.
2025 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
2026 * instruction).
2027 *
2028 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
2029 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
2030 * takes priority over both NMIs and hardware interrupts. So, neither is
2031 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
2032 * either unsupported will be triggered on-top of any \#DB raised here.)
2033 *
2034 * The RF flag only needs to be cleared here as it only suppresses instruction
2035 * breakpoints which are not raised here (happens synchronously during
2036 * instruction fetching).
2037 *
2038 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
2039 * status has no bearing on whether \#DB exceptions are raised.
2040 *
2041 * @note This must *NOT* be called by the two instructions setting the
2042 * CPUMCTX_INHIBIT_SHADOW_SS flag.
2043 *
2044 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
2045 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
2046 * Stacks}
2047 */
2048static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2049{
2050 /*
2051 * Normally we're just here to clear RF and/or interrupt shadow bits.
2052 */
2053 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
2054 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
2055 else
2056 {
2057 /*
2058 * Raise a #DB or/and DBGF event.
2059 */
2060 VBOXSTRICTRC rcStrict;
2061 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
2062 {
2063 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2064 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2065 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
2066 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2067 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2068 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2069 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2070 pVCpu->cpum.GstCtx.rflags.uBoth));
2071
2072 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2073 rcStrict = iemRaiseDebugException(pVCpu);
2074
2075 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2076 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2077 {
2078 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2079 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2080 }
2081 }
2082 else
2083 {
2084 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2085 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2086 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2087 }
2088 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2089 return rcStrict;
2090 }
2091 return VINF_SUCCESS;
2092}
2093
2094
2095/**
2096 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2097 *
2098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2099 */
2100DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2101{
2102 /*
2103 * We assume that most of the time nothing actually needs doing here.
2104 */
2105 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2106 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2107 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2108 return VINF_SUCCESS;
2109 return iemFinishInstructionWithFlagsSet(pVCpu);
2110}
2111
2112
2113/**
2114 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2115 * and CPUMCTX_INHIBIT_SHADOW.
2116 *
2117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2118 * @param cbInstr The number of bytes to add.
2119 */
2120DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2121{
2122 iemRegAddToRip(pVCpu, cbInstr);
2123 return iemRegFinishClearingRF(pVCpu);
2124}
2125
2126
2127/**
2128 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2129 * and CPUMCTX_INHIBIT_SHADOW.
2130 *
2131 * Only called from 64-bit code.
2132 *
2133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2134 * @param cbInstr The number of bytes to add.
2135 */
2136DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2137{
2138 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2139 return iemRegFinishClearingRF(pVCpu);
2140}
2141
2142
2143/**
2144 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2145 * CPUMCTX_INHIBIT_SHADOW.
2146 *
2147 * This is never from 64-bit code.
2148 *
2149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2150 * @param cbInstr The number of bytes to add.
2151 */
2152DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2153{
2154 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2155 return iemRegFinishClearingRF(pVCpu);
2156}
2157
2158
2159/**
2160 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
2161 * CPUMCTX_INHIBIT_SHADOW.
2162 *
2163 * This is only ever used from 16-bit code on a pre-386 CPU.
2164 *
2165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2166 * @param cbInstr The number of bytes to add.
2167 */
2168DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2169{
2170 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2171 return iemRegFinishClearingRF(pVCpu);
2172}
2173
2174
2175/**
2176 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
2177 *
2178 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2179 * segment limit.
2180 *
2181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2182 * @param cbInstr Instruction size.
2183 * @param offNextInstr The offset of the next instruction.
2184 * @param enmEffOpSize Effective operand size.
2185 */
2186DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2187 IEMMODE enmEffOpSize) RT_NOEXCEPT
2188{
2189 Assert(IEM_IS_64BIT_CODE(pVCpu));
2190 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2191
2192 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2193 if (enmEffOpSize == IEMMODE_16BIT)
2194 uNewRip &= UINT16_MAX;
2195
2196 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2197 pVCpu->cpum.GstCtx.rip = uNewRip;
2198 else
2199 return iemRaiseGeneralProtectionFault0(pVCpu);
2200
2201#ifndef IEM_WITH_CODE_TLB
2202 iemOpcodeFlushLight(pVCpu, cbInstr);
2203#endif
2204
2205 /*
2206 * Clear RF and finish the instruction (maybe raise #DB).
2207 */
2208 return iemRegFinishClearingRF(pVCpu);
2209}
2210
2211
2212/**
2213 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2214 * code (never 64-bit).
2215 *
2216 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2217 * segment limit.
2218 *
2219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2220 * @param cbInstr Instruction size.
2221 * @param offNextInstr The offset of the next instruction.
2222 * @param enmEffOpSize Effective operand size.
2223 */
2224DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2225 IEMMODE enmEffOpSize) RT_NOEXCEPT
2226{
2227 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2228 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2229
2230 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2231 if (enmEffOpSize == IEMMODE_16BIT)
2232 uNewEip &= UINT16_MAX;
2233 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2234 pVCpu->cpum.GstCtx.rip = uNewEip;
2235 else
2236 return iemRaiseGeneralProtectionFault0(pVCpu);
2237
2238#ifndef IEM_WITH_CODE_TLB
2239 iemOpcodeFlushLight(pVCpu, cbInstr);
2240#endif
2241
2242 /*
2243 * Clear RF and finish the instruction (maybe raise #DB).
2244 */
2245 return iemRegFinishClearingRF(pVCpu);
2246}
2247
2248
2249/**
2250 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
2251 *
2252 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2253 * segment limit.
2254 *
2255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2256 * @param cbInstr Instruction size.
2257 * @param offNextInstr The offset of the next instruction.
2258 */
2259DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2260 int8_t offNextInstr) RT_NOEXCEPT
2261{
2262 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2263
2264 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2265 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2266 pVCpu->cpum.GstCtx.rip = uNewIp;
2267 else
2268 return iemRaiseGeneralProtectionFault0(pVCpu);
2269
2270#ifndef IEM_WITH_CODE_TLB
2271 iemOpcodeFlushLight(pVCpu, cbInstr);
2272#endif
2273
2274 /*
2275 * Clear RF and finish the instruction (maybe raise #DB).
2276 */
2277 return iemRegFinishClearingRF(pVCpu);
2278}
2279
2280
2281/**
2282 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
2283 *
2284 * @returns Strict VBox status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param cbInstr Instruction size.
2287 * @param offNextInstr The offset of the next instruction.
2288 */
2289DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2290 int16_t offNextInstr) RT_NOEXCEPT
2291{
2292 Assert(IEM_IS_64BIT_CODE(pVCpu));
2293
2294 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2295
2296#ifndef IEM_WITH_CODE_TLB
2297 iemOpcodeFlushLight(pVCpu, cbInstr);
2298#endif
2299
2300 /*
2301 * Clear RF and finish the instruction (maybe raise #DB).
2302 */
2303 return iemRegFinishClearingRF(pVCpu);
2304}
2305
2306
2307/**
2308 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
2309 *
2310 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2311 * segment limit.
2312 *
2313 * @returns Strict VBox status code.
2314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2315 * @param cbInstr Instruction size.
2316 * @param offNextInstr The offset of the next instruction.
2317 *
2318 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2319 * identical.
2320 */
2321DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2322 int16_t offNextInstr) RT_NOEXCEPT
2323{
2324 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2325
2326 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2327 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2328 pVCpu->cpum.GstCtx.rip = uNewIp;
2329 else
2330 return iemRaiseGeneralProtectionFault0(pVCpu);
2331
2332#ifndef IEM_WITH_CODE_TLB
2333 iemOpcodeFlushLight(pVCpu, cbInstr);
2334#endif
2335
2336 /*
2337 * Clear RF and finish the instruction (maybe raise #DB).
2338 */
2339 return iemRegFinishClearingRF(pVCpu);
2340}
2341
2342
2343/**
2344 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2345 *
2346 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2347 * segment limit.
2348 *
2349 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2350 * only alternative for relative jumps in 64-bit code and that is already
2351 * handled in the decoder stage.
2352 *
2353 * @returns Strict VBox status code.
2354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2355 * @param cbInstr Instruction size.
2356 * @param offNextInstr The offset of the next instruction.
2357 */
2358DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2359 int32_t offNextInstr) RT_NOEXCEPT
2360{
2361 Assert(IEM_IS_64BIT_CODE(pVCpu));
2362
2363 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2364 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2365 pVCpu->cpum.GstCtx.rip = uNewRip;
2366 else
2367 return iemRaiseGeneralProtectionFault0(pVCpu);
2368
2369#ifndef IEM_WITH_CODE_TLB
2370 iemOpcodeFlushLight(pVCpu, cbInstr);
2371#endif
2372
2373 /*
2374 * Clear RF and finish the instruction (maybe raise #DB).
2375 */
2376 return iemRegFinishClearingRF(pVCpu);
2377}
2378
2379
2380/**
2381 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2382 *
2383 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2384 * segment limit.
2385 *
2386 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2387 * only alternative for relative jumps in 32-bit code and that is already
2388 * handled in the decoder stage.
2389 *
2390 * @returns Strict VBox status code.
2391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2392 * @param cbInstr Instruction size.
2393 * @param offNextInstr The offset of the next instruction.
2394 */
2395DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2396 int32_t offNextInstr) RT_NOEXCEPT
2397{
2398 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2399 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2400
2401 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2402 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2403 pVCpu->cpum.GstCtx.rip = uNewEip;
2404 else
2405 return iemRaiseGeneralProtectionFault0(pVCpu);
2406
2407#ifndef IEM_WITH_CODE_TLB
2408 iemOpcodeFlushLight(pVCpu, cbInstr);
2409#endif
2410
2411 /*
2412 * Clear RF and finish the instruction (maybe raise #DB).
2413 */
2414 return iemRegFinishClearingRF(pVCpu);
2415}
2416
2417
2418/**
2419 * Extended version of iemFinishInstructionWithFlagsSet that goes with
2420 * iemRegAddToRipAndFinishingClearingRfEx.
2421 *
2422 * See iemFinishInstructionWithFlagsSet() for details.
2423 */
2424static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2425{
2426 /*
2427 * Raise a #DB.
2428 */
2429 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2430 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2431 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
2432 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2433 /** @todo Do we set all pending \#DB events, or just one? */
2434 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
2435 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2436 pVCpu->cpum.GstCtx.rflags.uBoth));
2437 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2438 return iemRaiseDebugException(pVCpu);
2439}
2440
2441
2442/**
2443 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
2444 * others potentially updating EFLAGS.TF.
2445 *
2446 * The single step event must be generated using the TF value at the start of
2447 * the instruction, not the new value set by it.
2448 *
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param cbInstr The number of bytes to add.
2451 * @param fEflOld The EFLAGS at the start of the instruction
2452 * execution.
2453 */
2454DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
2455{
2456 iemRegAddToRip(pVCpu, cbInstr);
2457 if (!(fEflOld & X86_EFL_TF))
2458 return iemRegFinishClearingRF(pVCpu);
2459 return iemFinishInstructionWithTfSet(pVCpu);
2460}
2461
2462
2463#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2464/**
2465 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
2466 *
2467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2468 */
2469DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2470{
2471 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2472}
2473#endif
2474
2475
2476/**
2477 * Adds to the stack pointer.
2478 *
2479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2480 * @param cbToAdd The number of bytes to add (8-bit!).
2481 */
2482DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
2483{
2484 if (IEM_IS_64BIT_CODE(pVCpu))
2485 pVCpu->cpum.GstCtx.rsp += cbToAdd;
2486 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2487 pVCpu->cpum.GstCtx.esp += cbToAdd;
2488 else
2489 pVCpu->cpum.GstCtx.sp += cbToAdd;
2490}
2491
2492
2493/**
2494 * Subtracts from the stack pointer.
2495 *
2496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2497 * @param cbToSub The number of bytes to subtract (8-bit!).
2498 */
2499DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
2500{
2501 if (IEM_IS_64BIT_CODE(pVCpu))
2502 pVCpu->cpum.GstCtx.rsp -= cbToSub;
2503 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2504 pVCpu->cpum.GstCtx.esp -= cbToSub;
2505 else
2506 pVCpu->cpum.GstCtx.sp -= cbToSub;
2507}
2508
2509
2510/**
2511 * Adds to the temporary stack pointer.
2512 *
2513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2514 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2515 * @param cbToAdd The number of bytes to add (16-bit).
2516 */
2517DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
2518{
2519 if (IEM_IS_64BIT_CODE(pVCpu))
2520 pTmpRsp->u += cbToAdd;
2521 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2522 pTmpRsp->DWords.dw0 += cbToAdd;
2523 else
2524 pTmpRsp->Words.w0 += cbToAdd;
2525}
2526
2527
2528/**
2529 * Subtracts from the temporary stack pointer.
2530 *
2531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2532 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2533 * @param cbToSub The number of bytes to subtract.
2534 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
2535 * expecting that.
2536 */
2537DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
2538{
2539 if (IEM_IS_64BIT_CODE(pVCpu))
2540 pTmpRsp->u -= cbToSub;
2541 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2542 pTmpRsp->DWords.dw0 -= cbToSub;
2543 else
2544 pTmpRsp->Words.w0 -= cbToSub;
2545}
2546
2547
2548/**
2549 * Calculates the effective stack address for a push of the specified size as
2550 * well as the new RSP value (upper bits may be masked).
2551 *
2552 * @returns Effective stack addressf for the push.
2553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2554 * @param cbItem The size of the stack item to pop.
2555 * @param puNewRsp Where to return the new RSP value.
2556 */
2557DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2558{
2559 RTUINT64U uTmpRsp;
2560 RTGCPTR GCPtrTop;
2561 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2562
2563 if (IEM_IS_64BIT_CODE(pVCpu))
2564 GCPtrTop = uTmpRsp.u -= cbItem;
2565 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2566 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2567 else
2568 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2569 *puNewRsp = uTmpRsp.u;
2570 return GCPtrTop;
2571}
2572
2573
2574/**
2575 * Gets the current stack pointer and calculates the value after a pop of the
2576 * specified size.
2577 *
2578 * @returns Current stack pointer.
2579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2580 * @param cbItem The size of the stack item to pop.
2581 * @param puNewRsp Where to return the new RSP value.
2582 */
2583DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2584{
2585 RTUINT64U uTmpRsp;
2586 RTGCPTR GCPtrTop;
2587 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2588
2589 if (IEM_IS_64BIT_CODE(pVCpu))
2590 {
2591 GCPtrTop = uTmpRsp.u;
2592 uTmpRsp.u += cbItem;
2593 }
2594 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2595 {
2596 GCPtrTop = uTmpRsp.DWords.dw0;
2597 uTmpRsp.DWords.dw0 += cbItem;
2598 }
2599 else
2600 {
2601 GCPtrTop = uTmpRsp.Words.w0;
2602 uTmpRsp.Words.w0 += cbItem;
2603 }
2604 *puNewRsp = uTmpRsp.u;
2605 return GCPtrTop;
2606}
2607
2608
2609/**
2610 * Calculates the effective stack address for a push of the specified size as
2611 * well as the new temporary RSP value (upper bits may be masked).
2612 *
2613 * @returns Effective stack addressf for the push.
2614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2615 * @param pTmpRsp The temporary stack pointer. This is updated.
2616 * @param cbItem The size of the stack item to pop.
2617 */
2618DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2619{
2620 RTGCPTR GCPtrTop;
2621
2622 if (IEM_IS_64BIT_CODE(pVCpu))
2623 GCPtrTop = pTmpRsp->u -= cbItem;
2624 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2625 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2626 else
2627 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2628 return GCPtrTop;
2629}
2630
2631
2632/**
2633 * Gets the effective stack address for a pop of the specified size and
2634 * calculates and updates the temporary RSP.
2635 *
2636 * @returns Current stack pointer.
2637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2638 * @param pTmpRsp The temporary stack pointer. This is updated.
2639 * @param cbItem The size of the stack item to pop.
2640 */
2641DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2642{
2643 RTGCPTR GCPtrTop;
2644 if (IEM_IS_64BIT_CODE(pVCpu))
2645 {
2646 GCPtrTop = pTmpRsp->u;
2647 pTmpRsp->u += cbItem;
2648 }
2649 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2650 {
2651 GCPtrTop = pTmpRsp->DWords.dw0;
2652 pTmpRsp->DWords.dw0 += cbItem;
2653 }
2654 else
2655 {
2656 GCPtrTop = pTmpRsp->Words.w0;
2657 pTmpRsp->Words.w0 += cbItem;
2658 }
2659 return GCPtrTop;
2660}
2661
2662/** @} */
2663
2664
2665/** @name FPU access and helpers.
2666 *
2667 * @{
2668 */
2669
2670
2671/**
2672 * Hook for preparing to use the host FPU.
2673 *
2674 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2675 *
2676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2677 */
2678DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2679{
2680#ifdef IN_RING3
2681 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2682#else
2683 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2684#endif
2685 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2686}
2687
2688
2689/**
2690 * Hook for preparing to use the host FPU for SSE.
2691 *
2692 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2693 *
2694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2695 */
2696DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2697{
2698 iemFpuPrepareUsage(pVCpu);
2699}
2700
2701
2702/**
2703 * Hook for preparing to use the host FPU for AVX.
2704 *
2705 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2706 *
2707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2708 */
2709DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2710{
2711 iemFpuPrepareUsage(pVCpu);
2712}
2713
2714
2715/**
2716 * Hook for actualizing the guest FPU state before the interpreter reads it.
2717 *
2718 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2719 *
2720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2721 */
2722DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2723{
2724#ifdef IN_RING3
2725 NOREF(pVCpu);
2726#else
2727 CPUMRZFpuStateActualizeForRead(pVCpu);
2728#endif
2729 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2730}
2731
2732
2733/**
2734 * Hook for actualizing the guest FPU state before the interpreter changes it.
2735 *
2736 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2737 *
2738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2739 */
2740DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2741{
2742#ifdef IN_RING3
2743 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2744#else
2745 CPUMRZFpuStateActualizeForChange(pVCpu);
2746#endif
2747 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2748}
2749
2750
2751/**
2752 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2753 * only.
2754 *
2755 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2756 *
2757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2758 */
2759DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2760{
2761#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2762 NOREF(pVCpu);
2763#else
2764 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2765#endif
2766 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2767}
2768
2769
2770/**
2771 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2772 * read+write.
2773 *
2774 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2775 *
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 */
2778DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2779{
2780#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2781 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2782#else
2783 CPUMRZFpuStateActualizeForChange(pVCpu);
2784#endif
2785 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2786
2787 /* Make sure any changes are loaded the next time around. */
2788 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2789}
2790
2791
2792/**
2793 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2794 * only.
2795 *
2796 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2797 *
2798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2799 */
2800DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2801{
2802#ifdef IN_RING3
2803 NOREF(pVCpu);
2804#else
2805 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2806#endif
2807 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2808}
2809
2810
2811/**
2812 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2813 * read+write.
2814 *
2815 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2816 *
2817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2818 */
2819DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2820{
2821#ifdef IN_RING3
2822 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2823#else
2824 CPUMRZFpuStateActualizeForChange(pVCpu);
2825#endif
2826 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2827
2828 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2829 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2830}
2831
2832
2833/**
2834 * Stores a QNaN value into a FPU register.
2835 *
2836 * @param pReg Pointer to the register.
2837 */
2838DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2839{
2840 pReg->au32[0] = UINT32_C(0x00000000);
2841 pReg->au32[1] = UINT32_C(0xc0000000);
2842 pReg->au16[4] = UINT16_C(0xffff);
2843}
2844
2845
2846/**
2847 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
2848 *
2849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2850 * @param pFpuCtx The FPU context.
2851 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
2852 */
2853DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
2854{
2855 Assert(uFpuOpcode != UINT16_MAX);
2856 pFpuCtx->FOP = uFpuOpcode;
2857 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2858 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2859 {
2860 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2861 * happens in real mode here based on the fnsave and fnstenv images. */
2862 pFpuCtx->CS = 0;
2863 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2864 }
2865 else if (!IEM_IS_LONG_MODE(pVCpu))
2866 {
2867 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
2868 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2869 }
2870 else
2871 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2872}
2873
2874
2875/**
2876 * Marks the specified stack register as free (for FFREE).
2877 *
2878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2879 * @param iStReg The register to free.
2880 */
2881DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2882{
2883 Assert(iStReg < 8);
2884 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2885 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2886 pFpuCtx->FTW &= ~RT_BIT(iReg);
2887}
2888
2889
2890/**
2891 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
2892 *
2893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2894 */
2895DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2896{
2897 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2898 uint16_t uFsw = pFpuCtx->FSW;
2899 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2900 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2901 uFsw &= ~X86_FSW_TOP_MASK;
2902 uFsw |= uTop;
2903 pFpuCtx->FSW = uFsw;
2904}
2905
2906
2907/**
2908 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
2909 *
2910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2911 */
2912DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2913{
2914 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2915 uint16_t uFsw = pFpuCtx->FSW;
2916 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2917 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2918 uFsw &= ~X86_FSW_TOP_MASK;
2919 uFsw |= uTop;
2920 pFpuCtx->FSW = uFsw;
2921}
2922
2923
2924
2925
2926DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2927{
2928 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2929 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2930 if (pFpuCtx->FTW & RT_BIT(iReg))
2931 return VINF_SUCCESS;
2932 return VERR_NOT_FOUND;
2933}
2934
2935
2936DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
2937{
2938 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2939 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2940 if (pFpuCtx->FTW & RT_BIT(iReg))
2941 {
2942 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
2943 return VINF_SUCCESS;
2944 }
2945 return VERR_NOT_FOUND;
2946}
2947
2948
2949DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
2950 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
2951{
2952 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2953 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2954 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2955 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2956 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2957 {
2958 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2959 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
2960 return VINF_SUCCESS;
2961 }
2962 return VERR_NOT_FOUND;
2963}
2964
2965
2966DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
2967{
2968 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2969 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2970 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2971 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2972 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2973 {
2974 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2975 return VINF_SUCCESS;
2976 }
2977 return VERR_NOT_FOUND;
2978}
2979
2980
2981/**
2982 * Rotates the stack registers when setting new TOS.
2983 *
2984 * @param pFpuCtx The FPU context.
2985 * @param iNewTop New TOS value.
2986 * @remarks We only do this to speed up fxsave/fxrstor which
2987 * arrange the FP registers in stack order.
2988 * MUST be done before writing the new TOS (FSW).
2989 */
2990DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
2991{
2992 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2993 RTFLOAT80U ar80Temp[8];
2994
2995 if (iOldTop == iNewTop)
2996 return;
2997
2998 /* Unscrew the stack and get it into 'native' order. */
2999 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
3000 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
3001 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
3002 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
3003 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
3004 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
3005 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
3006 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
3007
3008 /* Now rotate the stack to the new position. */
3009 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
3010 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
3011 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
3012 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
3013 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
3014 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
3015 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
3016 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
3017}
3018
3019
3020/**
3021 * Updates the FPU exception status after FCW is changed.
3022 *
3023 * @param pFpuCtx The FPU context.
3024 */
3025DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
3026{
3027 uint16_t u16Fsw = pFpuCtx->FSW;
3028 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
3029 u16Fsw |= X86_FSW_ES | X86_FSW_B;
3030 else
3031 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
3032 pFpuCtx->FSW = u16Fsw;
3033}
3034
3035
3036/**
3037 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
3038 *
3039 * @returns The full FTW.
3040 * @param pFpuCtx The FPU context.
3041 */
3042DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
3043{
3044 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
3045 uint16_t u16Ftw = 0;
3046 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3047 for (unsigned iSt = 0; iSt < 8; iSt++)
3048 {
3049 unsigned const iReg = (iSt + iTop) & 7;
3050 if (!(u8Ftw & RT_BIT(iReg)))
3051 u16Ftw |= 3 << (iReg * 2); /* empty */
3052 else
3053 {
3054 uint16_t uTag;
3055 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
3056 if (pr80Reg->s.uExponent == 0x7fff)
3057 uTag = 2; /* Exponent is all 1's => Special. */
3058 else if (pr80Reg->s.uExponent == 0x0000)
3059 {
3060 if (pr80Reg->s.uMantissa == 0x0000)
3061 uTag = 1; /* All bits are zero => Zero. */
3062 else
3063 uTag = 2; /* Must be special. */
3064 }
3065 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
3066 uTag = 0; /* Valid. */
3067 else
3068 uTag = 2; /* Must be special. */
3069
3070 u16Ftw |= uTag << (iReg * 2);
3071 }
3072 }
3073
3074 return u16Ftw;
3075}
3076
3077
3078/**
3079 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
3080 *
3081 * @returns The compressed FTW.
3082 * @param u16FullFtw The full FTW to convert.
3083 */
3084DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
3085{
3086 uint8_t u8Ftw = 0;
3087 for (unsigned i = 0; i < 8; i++)
3088 {
3089 if ((u16FullFtw & 3) != 3 /*empty*/)
3090 u8Ftw |= RT_BIT(i);
3091 u16FullFtw >>= 2;
3092 }
3093
3094 return u8Ftw;
3095}
3096
3097/** @} */
3098
3099
3100/** @name Memory access.
3101 *
3102 * @{
3103 */
3104
3105
3106/**
3107 * Checks whether alignment checks are enabled or not.
3108 *
3109 * @returns true if enabled, false if not.
3110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3111 */
3112DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
3113{
3114 AssertCompile(X86_CR0_AM == X86_EFL_AC);
3115 return IEM_GET_CPL(pVCpu) == 3
3116 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
3117}
3118
3119/**
3120 * Checks if the given segment can be written to, raise the appropriate
3121 * exception if not.
3122 *
3123 * @returns VBox strict status code.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3126 * @param pHid Pointer to the hidden register.
3127 * @param iSegReg The register number.
3128 * @param pu64BaseAddr Where to return the base address to use for the
3129 * segment. (In 64-bit code it may differ from the
3130 * base in the hidden segment.)
3131 */
3132DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3133 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3134{
3135 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3136
3137 if (IEM_IS_64BIT_CODE(pVCpu))
3138 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3139 else
3140 {
3141 if (!pHid->Attr.n.u1Present)
3142 {
3143 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3144 AssertRelease(uSel == 0);
3145 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3146 return iemRaiseGeneralProtectionFault0(pVCpu);
3147 }
3148
3149 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3150 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3151 && !IEM_IS_64BIT_CODE(pVCpu) )
3152 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3153 *pu64BaseAddr = pHid->u64Base;
3154 }
3155 return VINF_SUCCESS;
3156}
3157
3158
3159/**
3160 * Checks if the given segment can be read from, raise the appropriate
3161 * exception if not.
3162 *
3163 * @returns VBox strict status code.
3164 *
3165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3166 * @param pHid Pointer to the hidden register.
3167 * @param iSegReg The register number.
3168 * @param pu64BaseAddr Where to return the base address to use for the
3169 * segment. (In 64-bit code it may differ from the
3170 * base in the hidden segment.)
3171 */
3172DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3173 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3174{
3175 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3176
3177 if (IEM_IS_64BIT_CODE(pVCpu))
3178 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3179 else
3180 {
3181 if (!pHid->Attr.n.u1Present)
3182 {
3183 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3184 AssertRelease(uSel == 0);
3185 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3186 return iemRaiseGeneralProtectionFault0(pVCpu);
3187 }
3188
3189 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3190 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3191 *pu64BaseAddr = pHid->u64Base;
3192 }
3193 return VINF_SUCCESS;
3194}
3195
3196
3197/**
3198 * Maps a physical page.
3199 *
3200 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3202 * @param GCPhysMem The physical address.
3203 * @param fAccess The intended access.
3204 * @param ppvMem Where to return the mapping address.
3205 * @param pLock The PGM lock.
3206 */
3207DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3208 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3209{
3210#ifdef IEM_LOG_MEMORY_WRITES
3211 if (fAccess & IEM_ACCESS_TYPE_WRITE)
3212 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3213#endif
3214
3215 /** @todo This API may require some improving later. A private deal with PGM
3216 * regarding locking and unlocking needs to be struct. A couple of TLBs
3217 * living in PGM, but with publicly accessible inlined access methods
3218 * could perhaps be an even better solution. */
3219 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
3220 GCPhysMem,
3221 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3222 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
3223 ppvMem,
3224 pLock);
3225 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
3226 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3227
3228 return rc;
3229}
3230
3231
3232/**
3233 * Unmap a page previously mapped by iemMemPageMap.
3234 *
3235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3236 * @param GCPhysMem The physical address.
3237 * @param fAccess The intended access.
3238 * @param pvMem What iemMemPageMap returned.
3239 * @param pLock The PGM lock.
3240 */
3241DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3242 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3243{
3244 NOREF(pVCpu);
3245 NOREF(GCPhysMem);
3246 NOREF(fAccess);
3247 NOREF(pvMem);
3248 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
3249}
3250
3251#ifdef IEM_WITH_SETJMP
3252
3253/** @todo slim this down */
3254DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
3255 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3256{
3257 Assert(cbMem >= 1);
3258 Assert(iSegReg < X86_SREG_COUNT);
3259
3260 /*
3261 * 64-bit mode is simpler.
3262 */
3263 if (IEM_IS_64BIT_CODE(pVCpu))
3264 {
3265 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
3266 {
3267 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3268 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3269 GCPtrMem += pSel->u64Base;
3270 }
3271
3272 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3273 return GCPtrMem;
3274 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3275 }
3276 /*
3277 * 16-bit and 32-bit segmentation.
3278 */
3279 else if (iSegReg != UINT8_MAX)
3280 {
3281 /** @todo Does this apply to segments with 4G-1 limit? */
3282 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3283 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
3284 {
3285 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3286 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3287 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3288 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
3289 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
3290 | X86_SEL_TYPE_CODE))
3291 {
3292 case X86DESCATTR_P: /* readonly data, expand up */
3293 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
3294 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
3295 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
3296 /* expand up */
3297 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
3298 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3299 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
3300 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
3301 break;
3302
3303 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
3304 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
3305 /* expand down */
3306 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3307 && ( pSel->Attr.n.u1DefBig
3308 || GCPtrLast32 <= UINT32_C(0xffff)) ))
3309 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3310 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
3311 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
3312 break;
3313
3314 default:
3315 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
3316 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3317 break;
3318 }
3319 }
3320 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
3321 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3322 }
3323 /*
3324 * 32-bit flat address.
3325 */
3326 else
3327 return GCPtrMem;
3328}
3329
3330
3331/** @todo slim this down */
3332DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
3333 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3334{
3335 Assert(cbMem >= 1);
3336 Assert(iSegReg < X86_SREG_COUNT);
3337
3338 /*
3339 * 64-bit mode is simpler.
3340 */
3341 if (IEM_IS_64BIT_CODE(pVCpu))
3342 {
3343 if (iSegReg >= X86_SREG_FS)
3344 {
3345 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3346 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3347 GCPtrMem += pSel->u64Base;
3348 }
3349
3350 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3351 return GCPtrMem;
3352 }
3353 /*
3354 * 16-bit and 32-bit segmentation.
3355 */
3356 else
3357 {
3358 Assert(GCPtrMem <= UINT32_MAX);
3359 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3360 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3361 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3362 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
3363 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
3364 /** @todo explore exactly how the CS stuff works in real mode. See also
3365 * http://www.rcollins.org/Productivity/DescriptorCache.html and
3366 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
3367 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
3368 {
3369 /* expand up */
3370 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3371 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
3372 && GCPtrLast32 >= (uint32_t)GCPtrMem))
3373 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3374 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3375 }
3376 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
3377 {
3378 /* expand down - the uppger boundary is defined by the B bit, not G. */
3379 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3380 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
3381 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
3382 && GCPtrLast32 >= (uint32_t)GCPtrMem))
3383 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3384 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3385 }
3386 else
3387 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3388 }
3389 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3390}
3391
3392#endif /* IEM_WITH_SETJMP */
3393
3394/**
3395 * Fakes a long mode stack selector for SS = 0.
3396 *
3397 * @param pDescSs Where to return the fake stack descriptor.
3398 * @param uDpl The DPL we want.
3399 */
3400DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
3401{
3402 pDescSs->Long.au64[0] = 0;
3403 pDescSs->Long.au64[1] = 0;
3404 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3405 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
3406 pDescSs->Long.Gen.u2Dpl = uDpl;
3407 pDescSs->Long.Gen.u1Present = 1;
3408 pDescSs->Long.Gen.u1Long = 1;
3409}
3410
3411/*
3412 * Instantiate R/W inline templates.
3413 */
3414#define TMPL_MEM_TYPE uint8_t
3415#define TMPL_MEM_TYPE_ALIGN 0
3416#define TMPL_MEM_TYPE_SIZE 1
3417#define TMPL_MEM_FN_SUFF U8
3418#define TMPL_MEM_FMT_TYPE "%#04x"
3419#define TMPL_MEM_FMT_DESC "byte"
3420#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3421
3422#define TMPL_MEM_TYPE uint16_t
3423#define TMPL_MEM_TYPE_ALIGN 1
3424#define TMPL_MEM_TYPE_SIZE 2
3425#define TMPL_MEM_FN_SUFF U16
3426#define TMPL_MEM_FMT_TYPE "%#06x"
3427#define TMPL_MEM_FMT_DESC "word"
3428#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3429
3430#define TMPL_MEM_TYPE uint32_t
3431#define TMPL_MEM_TYPE_ALIGN 3
3432#define TMPL_MEM_TYPE_SIZE 4
3433#define TMPL_MEM_FN_SUFF U32
3434#define TMPL_MEM_FMT_TYPE "%#010x"
3435#define TMPL_MEM_FMT_DESC "dword"
3436#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3437
3438#define TMPL_MEM_TYPE uint64_t
3439#define TMPL_MEM_TYPE_ALIGN 7
3440#define TMPL_MEM_TYPE_SIZE 8
3441#define TMPL_MEM_FN_SUFF U64
3442#define TMPL_MEM_FMT_TYPE "%#018RX64"
3443#define TMPL_MEM_FMT_DESC "qword"
3444#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3445
3446#define TMPL_MEM_NO_STORE
3447#define TMPL_MEM_NO_MAPPING
3448#define TMPL_MEM_TYPE uint64_t
3449#define TMPL_MEM_TYPE_ALIGN 15
3450#define TMPL_MEM_TYPE_SIZE 8
3451#define TMPL_MEM_FN_SUFF U64AlignedU128
3452#define TMPL_MEM_FMT_TYPE "%#018RX64"
3453#define TMPL_MEM_FMT_DESC "qword"
3454#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
3455
3456/** @} */
3457
3458
3459#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3460
3461/**
3462 * Gets CR0 fixed-0 bits in VMX operation.
3463 *
3464 * We do this rather than fetching what we report to the guest (in
3465 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
3466 * values regardless of whether unrestricted-guest feature is available on the CPU.
3467 *
3468 * @returns CR0 fixed-0 bits.
3469 * @param pVCpu The cross context virtual CPU structure.
3470 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
3471 * must be returned. When @c false, the CR0 fixed-0
3472 * bits for VMX root mode is returned.
3473 *
3474 */
3475DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
3476{
3477 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
3478
3479 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
3480 if ( fVmxNonRootMode
3481 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
3482 return VMX_V_CR0_FIXED0_UX;
3483 return VMX_V_CR0_FIXED0;
3484}
3485
3486
3487/**
3488 * Sets virtual-APIC write emulation as pending.
3489 *
3490 * @param pVCpu The cross context virtual CPU structure.
3491 * @param offApic The offset in the virtual-APIC page that was written.
3492 */
3493DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
3494{
3495 Assert(offApic < XAPIC_OFF_END + 4);
3496
3497 /*
3498 * Record the currently updated APIC offset, as we need this later for figuring
3499 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3500 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3501 */
3502 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3503
3504 /*
3505 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3506 * virtualization or APIC-write emulation).
3507 */
3508 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3509 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3510}
3511
3512#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3513
3514#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette