VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 100052

Last change on this file since 100052 was 100052, checked in by vboxsync, 18 months ago

VMM/IEM: Refactored the enmCpuMode, uCpl, fBypassHandlers, fDisregardLock and fPendingInstruction* IEMCPU members into a single fExec member and associated IEM_F_XXX flag defines. Added more flags needed for recompiled execution. The fExec value is maintained as code is executed, so it does not need to be recalculated in the instruction loops. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 109.1 KB
Line 
1/* $Id: IEMInline.h 100052 2023-06-02 14:49:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 if (RT_SUCCESS(rcStrict))
49 {
50 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
51 || rcStrict == VINF_IOM_R3_IOPORT_READ
52 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
53 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
54 || rcStrict == VINF_IOM_R3_MMIO_READ
55 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
56 || rcStrict == VINF_IOM_R3_MMIO_WRITE
57 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
58 || rcStrict == VINF_CPUM_R3_MSR_READ
59 || rcStrict == VINF_CPUM_R3_MSR_WRITE
60 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
61 || rcStrict == VINF_EM_RAW_TO_R3
62 || rcStrict == VINF_EM_TRIPLE_FAULT
63 || rcStrict == VINF_GIM_R3_HYPERCALL
64 /* raw-mode / virt handlers only: */
65 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
66 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
67 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
68 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
69 || rcStrict == VINF_SELM_SYNC_GDT
70 || rcStrict == VINF_CSAM_PENDING_ACTION
71 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
72 /* nested hw.virt codes: */
73 || rcStrict == VINF_VMX_VMEXIT
74 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
75 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
76 || rcStrict == VINF_SVM_VMEXIT
77 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
78/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
79 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
80#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
81 if ( rcStrict == VINF_VMX_VMEXIT
82 && rcPassUp == VINF_SUCCESS)
83 rcStrict = VINF_SUCCESS;
84 else
85#endif
86#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
87 if ( rcStrict == VINF_SVM_VMEXIT
88 && rcPassUp == VINF_SUCCESS)
89 rcStrict = VINF_SUCCESS;
90 else
91#endif
92 if (rcPassUp == VINF_SUCCESS)
93 pVCpu->iem.s.cRetInfStatuses++;
94 else if ( rcPassUp < VINF_EM_FIRST
95 || rcPassUp > VINF_EM_LAST
96 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
97 {
98 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
99 pVCpu->iem.s.cRetPassUpStatus++;
100 rcStrict = rcPassUp;
101 }
102 else
103 {
104 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
105 pVCpu->iem.s.cRetInfStatuses++;
106 }
107 }
108 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
109 pVCpu->iem.s.cRetAspectNotImplemented++;
110 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
111 pVCpu->iem.s.cRetInstrNotImplemented++;
112 else
113 pVCpu->iem.s.cRetErrStatuses++;
114 }
115 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
116 {
117 pVCpu->iem.s.cRetPassUpStatus++;
118 rcStrict = pVCpu->iem.s.rcPassUp;
119 }
120
121 return rcStrict;
122}
123
124
125/**
126 * Sets the pass up status.
127 *
128 * @returns VINF_SUCCESS.
129 * @param pVCpu The cross context virtual CPU structure of the
130 * calling thread.
131 * @param rcPassUp The pass up status. Must be informational.
132 * VINF_SUCCESS is not allowed.
133 */
134DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
135{
136 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
137
138 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
139 if (rcOldPassUp == VINF_SUCCESS)
140 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
141 /* If both are EM scheduling codes, use EM priority rules. */
142 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
143 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
144 {
145 if (rcPassUp < rcOldPassUp)
146 {
147 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
148 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
149 }
150 else
151 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
152 }
153 /* Override EM scheduling with specific status code. */
154 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
155 {
156 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
157 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
158 }
159 /* Don't override specific status code, first come first served. */
160 else
161 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
162 return VINF_SUCCESS;
163}
164
165
166/**
167 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
168 *
169 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
170 * reject expand down data segments and conforming code segments.
171 *
172 * ASSUMES that the CPU is in 32-bit mode.
173 *
174 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
175 * @param pVCpu The cross context virtual CPU structure of the
176 * calling thread.
177 * @sa iemCalc32BitFlatIndicatorEsDs
178 */
179DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
180{
181 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
182 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
183 | pVCpu->cpum.GstCtx.cs.Attr.u
184 | pVCpu->cpum.GstCtx.ss.Attr.u
185 | pVCpu->cpum.GstCtx.ds.Attr.u)
186 & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P))
187 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
188 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
189 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
190 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
191 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
192 == 0
193 && ( pVCpu->cpum.GstCtx.es.u64Base
194 | pVCpu->cpum.GstCtx.cs.u64Base
195 | pVCpu->cpum.GstCtx.ss.u64Base
196 | pVCpu->cpum.GstCtx.ds.u64Base)
197 == 0
198 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
199 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
200}
201
202
203/**
204 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
205 * flat already.
206 *
207 * This is used by sysenter.
208 *
209 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
210 * @param pVCpu The cross context virtual CPU structure of the
211 * calling thread.
212 * @sa iemCalc32BitFlatIndicator
213 */
214DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
215{
216 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
217 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
218 | pVCpu->cpum.GstCtx.ds.Attr.u)
219 & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P))
220 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
221 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
222 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
223 == 0
224 && ( pVCpu->cpum.GstCtx.es.u64Base
225 | pVCpu->cpum.GstCtx.ds.u64Base)
226 == 0
227 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
228 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
229}
230
231
232/**
233 * Calculates the IEM_F_MODE_XXX and CPL flags.
234 *
235 * @returns IEM_F_MODE_XXX
236 * @param pVCpu The cross context virtual CPU structure of the
237 * calling thread.
238 */
239DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
240{
241 /*
242 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
243 * here to try get this done as efficiently as possible.
244 */
245 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
246
247 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
248 {
249 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
250 {
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
252 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
253 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
254 {
255 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
256 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
257 }
258 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
259 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
260 fExec |= IEM_F_MODE_X86_64BIT;
261 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
262 fExec |= IEM_F_MODE_X86_16BIT_PROT;
263 else
264 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
265 return fExec;
266 }
267 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
268 }
269
270 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
271 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
272 {
273 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
274 return IEM_F_MODE_X86_16BIT;
275 return IEM_F_MODE_X86_16BIT_PRE_386;
276 }
277
278 /* 32-bit unreal mode. */
279 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
280}
281
282
283/**
284 * Calculates the AMD-V and VT-x related context flags.
285 *
286 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
287 * IEM_F_X86_CTX_VMX.
288 * @param pVCpu The cross context virtual CPU structure of the
289 * calling thread.
290 */
291DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
292{
293 /*
294 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
295 * and CPUMIsGuestInNestedHwvirtMode to some extent.
296 */
297 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
298
299 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
300 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
301 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
302 if (RT_LIKELY(!fTmp))
303 return 0; /* likely */
304
305 if (fTmp & X86_CR4_VMXE)
306 {
307 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
308 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
309 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
310 return IEM_F_X86_CTX_VMX;
311 }
312
313 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
314 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
315 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
316 return IEM_F_X86_CTX_SVM;
317}
318
319
320/**
321 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
322 *
323 * @returns IEM_F_BRK_PENDING_XXX or zero.
324 * @param pVCpu The cross context virtual CPU structure of the
325 * calling thread.
326 */
327DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
328{
329 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
330
331 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
332 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
333 return 0;
334 return iemCalcExecDbgFlagsSlow(pVCpu);
335}
336
337/**
338 * Calculates the the IEM_F_XXX flags.
339 *
340 * @returns IEM_F_XXX combination match the current CPU state.
341 * @param pVCpu The cross context virtual CPU structure of the
342 * calling thread.
343 */
344DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
345{
346 return iemCalcExecModeAndCplFlags(pVCpu)
347 | iemCalcExecHwVirtFlags(pVCpu)
348 /* SMM is not yet implemented */
349 | iemCalcExecDbgFlags(pVCpu)
350 ;
351}
352
353
354/**
355 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
356 *
357 * @param pVCpu The cross context virtual CPU structure of the
358 * calling thread.
359 */
360DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplFlags(PVMCPUCC pVCpu)
361{
362 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
363 | iemCalcExecModeAndCplFlags(pVCpu);
364}
365
366
367/**
368 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
369 *
370 * @param pVCpu The cross context virtual CPU structure of the
371 * calling thread.
372 */
373DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
374{
375 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
376 | iemCalcExecDbgFlags(pVCpu);
377}
378
379
380#ifndef IEM_WITH_OPAQUE_DECODER_STATE
381
382# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
383/**
384 * Initializes the execution state.
385 *
386 * @param pVCpu The cross context virtual CPU structure of the
387 * calling thread.
388 * @param fExecOpts Optional execution flags:
389 * - IEM_F_BYPASS_HANDLERS
390 * - IEM_F_X86_DISREGARD_LOCK
391 *
392 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
393 * side-effects in strict builds.
394 */
395DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
396{
397 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
398 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
399 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
400 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
401 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
402 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
403 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
404 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
406 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
407
408 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
409# ifdef VBOX_STRICT
410 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
411 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
412 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
413 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
414 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
415 pVCpu->iem.s.uRexReg = 127;
416 pVCpu->iem.s.uRexB = 127;
417 pVCpu->iem.s.offModRm = 127;
418 pVCpu->iem.s.uRexIndex = 127;
419 pVCpu->iem.s.iEffSeg = 127;
420 pVCpu->iem.s.idxPrefix = 127;
421 pVCpu->iem.s.uVex3rdReg = 127;
422 pVCpu->iem.s.uVexLength = 127;
423 pVCpu->iem.s.fEvexStuff = 127;
424 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
425# ifdef IEM_WITH_CODE_TLB
426 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
427 pVCpu->iem.s.pbInstrBuf = NULL;
428 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
429 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
430 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
431 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
432# else
433 pVCpu->iem.s.offOpcode = 127;
434 pVCpu->iem.s.cbOpcode = 127;
435# endif
436# endif /* VBOX_STRICT */
437
438 pVCpu->iem.s.cActiveMappings = 0;
439 pVCpu->iem.s.iNextMapping = 0;
440 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
441}
442# endif /* VBOX_INCLUDED_vmm_dbgf_h */
443
444
445# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
446/**
447 * Performs a minimal reinitialization of the execution state.
448 *
449 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
450 * 'world-switch' types operations on the CPU. Currently only nested
451 * hardware-virtualization uses it.
452 *
453 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
454 * @param cbInstr The instruction length (for flushing).
455 */
456DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
457{
458 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
459 iemOpcodeFlushHeavy(pVCpu, cbInstr);
460}
461# endif
462
463
464/**
465 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
466 *
467 * @param pVCpu The cross context virtual CPU structure of the
468 * calling thread.
469 */
470DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
471{
472 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
473# ifdef VBOX_STRICT
474# ifdef IEM_WITH_CODE_TLB
475 NOREF(pVCpu);
476# else
477 pVCpu->iem.s.cbOpcode = 0;
478# endif
479# else
480 NOREF(pVCpu);
481# endif
482}
483
484
485/**
486 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
487 *
488 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
489 *
490 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
492 * @param rcStrict The status code to fiddle.
493 */
494DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
495{
496 iemUninitExec(pVCpu);
497 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
498}
499
500
501/**
502 * Macro used by the IEMExec* method to check the given instruction length.
503 *
504 * Will return on failure!
505 *
506 * @param a_cbInstr The given instruction length.
507 * @param a_cbMin The minimum length.
508 */
509# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
510 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
511 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
512
513
514# ifndef IEM_WITH_SETJMP
515
516/**
517 * Fetches the first opcode byte.
518 *
519 * @returns Strict VBox status code.
520 * @param pVCpu The cross context virtual CPU structure of the
521 * calling thread.
522 * @param pu8 Where to return the opcode byte.
523 */
524DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
525{
526 /*
527 * Check for hardware instruction breakpoints.
528 */
529 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
530 { /* likely */ }
531 else
532 {
533 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
534 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
535 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
536 { /* likely */ }
537 else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
538 return iemRaiseDebugException(pVCpu);
539 else
540 return rcStrict;
541 }
542
543 /*
544 * Fetch the first opcode byte.
545 */
546 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
547 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
548 {
549 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
550 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
551 return VINF_SUCCESS;
552 }
553 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
554}
555
556# else /* IEM_WITH_SETJMP */
557
558/**
559 * Fetches the first opcode byte, longjmp on error.
560 *
561 * @returns The opcode byte.
562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
563 */
564DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
565{
566 /*
567 * Check for hardware instruction breakpoints.
568 */
569 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
570 { /* likely */ }
571 else
572 {
573 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
574 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
575 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
576 { /* likely */ }
577 else
578 {
579 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
580 rcStrict = iemRaiseDebugException(pVCpu);
581 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
582 }
583 }
584
585 /*
586 * Fetch the first opcode byte.
587 */
588# ifdef IEM_WITH_CODE_TLB
589 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
590 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
591 if (RT_LIKELY( pbBuf != NULL
592 && offBuf < pVCpu->iem.s.cbInstrBuf))
593 {
594 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
595 return pbBuf[offBuf];
596 }
597# else
598 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
599 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
600 {
601 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
602 return pVCpu->iem.s.abOpcode[offOpcode];
603 }
604# endif
605 return iemOpcodeGetNextU8SlowJmp(pVCpu);
606}
607
608# endif /* IEM_WITH_SETJMP */
609
610/**
611 * Fetches the first opcode byte, returns/throws automatically on failure.
612 *
613 * @param a_pu8 Where to return the opcode byte.
614 * @remark Implicitly references pVCpu.
615 */
616# ifndef IEM_WITH_SETJMP
617# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
618 do \
619 { \
620 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
621 if (rcStrict2 == VINF_SUCCESS) \
622 { /* likely */ } \
623 else \
624 return rcStrict2; \
625 } while (0)
626# else
627# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
628# endif /* IEM_WITH_SETJMP */
629
630
631# ifndef IEM_WITH_SETJMP
632
633/**
634 * Fetches the next opcode byte.
635 *
636 * @returns Strict VBox status code.
637 * @param pVCpu The cross context virtual CPU structure of the
638 * calling thread.
639 * @param pu8 Where to return the opcode byte.
640 */
641DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
642{
643 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
644 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
645 {
646 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
647 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
648 return VINF_SUCCESS;
649 }
650 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
651}
652
653# else /* IEM_WITH_SETJMP */
654
655/**
656 * Fetches the next opcode byte, longjmp on error.
657 *
658 * @returns The opcode byte.
659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
660 */
661DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
662{
663# ifdef IEM_WITH_CODE_TLB
664 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
665 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
666 if (RT_LIKELY( pbBuf != NULL
667 && offBuf < pVCpu->iem.s.cbInstrBuf))
668 {
669 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
670 return pbBuf[offBuf];
671 }
672# else
673 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
674 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
675 {
676 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
677 return pVCpu->iem.s.abOpcode[offOpcode];
678 }
679# endif
680 return iemOpcodeGetNextU8SlowJmp(pVCpu);
681}
682
683# endif /* IEM_WITH_SETJMP */
684
685/**
686 * Fetches the next opcode byte, returns automatically on failure.
687 *
688 * @param a_pu8 Where to return the opcode byte.
689 * @remark Implicitly references pVCpu.
690 */
691# ifndef IEM_WITH_SETJMP
692# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
693 do \
694 { \
695 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
696 if (rcStrict2 == VINF_SUCCESS) \
697 { /* likely */ } \
698 else \
699 return rcStrict2; \
700 } while (0)
701# else
702# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
703# endif /* IEM_WITH_SETJMP */
704
705
706# ifndef IEM_WITH_SETJMP
707/**
708 * Fetches the next signed byte from the opcode stream.
709 *
710 * @returns Strict VBox status code.
711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
712 * @param pi8 Where to return the signed byte.
713 */
714DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
715{
716 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
717}
718# endif /* !IEM_WITH_SETJMP */
719
720
721/**
722 * Fetches the next signed byte from the opcode stream, returning automatically
723 * on failure.
724 *
725 * @param a_pi8 Where to return the signed byte.
726 * @remark Implicitly references pVCpu.
727 */
728# ifndef IEM_WITH_SETJMP
729# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
730 do \
731 { \
732 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
733 if (rcStrict2 != VINF_SUCCESS) \
734 return rcStrict2; \
735 } while (0)
736# else /* IEM_WITH_SETJMP */
737# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
738
739# endif /* IEM_WITH_SETJMP */
740
741
742# ifndef IEM_WITH_SETJMP
743/**
744 * Fetches the next signed byte from the opcode stream, extending it to
745 * unsigned 16-bit.
746 *
747 * @returns Strict VBox status code.
748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
749 * @param pu16 Where to return the unsigned word.
750 */
751DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
752{
753 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
754 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
755 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
756
757 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
758 pVCpu->iem.s.offOpcode = offOpcode + 1;
759 return VINF_SUCCESS;
760}
761# endif /* !IEM_WITH_SETJMP */
762
763/**
764 * Fetches the next signed byte from the opcode stream and sign-extending it to
765 * a word, returning automatically on failure.
766 *
767 * @param a_pu16 Where to return the word.
768 * @remark Implicitly references pVCpu.
769 */
770# ifndef IEM_WITH_SETJMP
771# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
772 do \
773 { \
774 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
775 if (rcStrict2 != VINF_SUCCESS) \
776 return rcStrict2; \
777 } while (0)
778# else
779# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
780# endif
781
782# ifndef IEM_WITH_SETJMP
783/**
784 * Fetches the next signed byte from the opcode stream, extending it to
785 * unsigned 32-bit.
786 *
787 * @returns Strict VBox status code.
788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
789 * @param pu32 Where to return the unsigned dword.
790 */
791DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
792{
793 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
794 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
795 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
796
797 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
798 pVCpu->iem.s.offOpcode = offOpcode + 1;
799 return VINF_SUCCESS;
800}
801# endif /* !IEM_WITH_SETJMP */
802
803/**
804 * Fetches the next signed byte from the opcode stream and sign-extending it to
805 * a word, returning automatically on failure.
806 *
807 * @param a_pu32 Where to return the word.
808 * @remark Implicitly references pVCpu.
809 */
810# ifndef IEM_WITH_SETJMP
811# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
812 do \
813 { \
814 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
815 if (rcStrict2 != VINF_SUCCESS) \
816 return rcStrict2; \
817 } while (0)
818# else
819# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
820# endif
821
822
823# ifndef IEM_WITH_SETJMP
824/**
825 * Fetches the next signed byte from the opcode stream, extending it to
826 * unsigned 64-bit.
827 *
828 * @returns Strict VBox status code.
829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
830 * @param pu64 Where to return the unsigned qword.
831 */
832DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
833{
834 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
835 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
836 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
837
838 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
839 pVCpu->iem.s.offOpcode = offOpcode + 1;
840 return VINF_SUCCESS;
841}
842# endif /* !IEM_WITH_SETJMP */
843
844/**
845 * Fetches the next signed byte from the opcode stream and sign-extending it to
846 * a word, returning automatically on failure.
847 *
848 * @param a_pu64 Where to return the word.
849 * @remark Implicitly references pVCpu.
850 */
851# ifndef IEM_WITH_SETJMP
852# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
853 do \
854 { \
855 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
856 if (rcStrict2 != VINF_SUCCESS) \
857 return rcStrict2; \
858 } while (0)
859# else
860# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
861# endif
862
863
864# ifndef IEM_WITH_SETJMP
865/**
866 * Fetches the next opcode byte.
867 *
868 * @returns Strict VBox status code.
869 * @param pVCpu The cross context virtual CPU structure of the
870 * calling thread.
871 * @param pu8 Where to return the opcode byte.
872 */
873DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
874{
875 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
876 pVCpu->iem.s.offModRm = offOpcode;
877 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
878 {
879 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
880 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
881 return VINF_SUCCESS;
882 }
883 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
884}
885# else /* IEM_WITH_SETJMP */
886/**
887 * Fetches the next opcode byte, longjmp on error.
888 *
889 * @returns The opcode byte.
890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
891 */
892DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
893{
894# ifdef IEM_WITH_CODE_TLB
895 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
896 pVCpu->iem.s.offModRm = offBuf;
897 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
898 if (RT_LIKELY( pbBuf != NULL
899 && offBuf < pVCpu->iem.s.cbInstrBuf))
900 {
901 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
902 return pbBuf[offBuf];
903 }
904# else
905 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
906 pVCpu->iem.s.offModRm = offOpcode;
907 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
908 {
909 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
910 return pVCpu->iem.s.abOpcode[offOpcode];
911 }
912# endif
913 return iemOpcodeGetNextU8SlowJmp(pVCpu);
914}
915# endif /* IEM_WITH_SETJMP */
916
917/**
918 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
919 * on failure.
920 *
921 * Will note down the position of the ModR/M byte for VT-x exits.
922 *
923 * @param a_pbRm Where to return the RM opcode byte.
924 * @remark Implicitly references pVCpu.
925 */
926# ifndef IEM_WITH_SETJMP
927# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
928 do \
929 { \
930 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
931 if (rcStrict2 == VINF_SUCCESS) \
932 { /* likely */ } \
933 else \
934 return rcStrict2; \
935 } while (0)
936# else
937# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
938# endif /* IEM_WITH_SETJMP */
939
940
941# ifndef IEM_WITH_SETJMP
942
943/**
944 * Fetches the next opcode word.
945 *
946 * @returns Strict VBox status code.
947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
948 * @param pu16 Where to return the opcode word.
949 */
950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
951{
952 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
953 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
954 {
955 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
956# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
957 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
958# else
959 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
960# endif
961 return VINF_SUCCESS;
962 }
963 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
964}
965
966# else /* IEM_WITH_SETJMP */
967
968/**
969 * Fetches the next opcode word, longjmp on error.
970 *
971 * @returns The opcode word.
972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
973 */
974DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
975{
976# ifdef IEM_WITH_CODE_TLB
977 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
978 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
979 if (RT_LIKELY( pbBuf != NULL
980 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
981 {
982 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
983# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
984 return *(uint16_t const *)&pbBuf[offBuf];
985# else
986 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
987# endif
988 }
989# else /* !IEM_WITH_CODE_TLB */
990 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
991 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
992 {
993 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
994# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
995 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
996# else
997 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
998# endif
999 }
1000# endif /* !IEM_WITH_CODE_TLB */
1001 return iemOpcodeGetNextU16SlowJmp(pVCpu);
1002}
1003
1004# endif /* IEM_WITH_SETJMP */
1005
1006/**
1007 * Fetches the next opcode word, returns automatically on failure.
1008 *
1009 * @param a_pu16 Where to return the opcode word.
1010 * @remark Implicitly references pVCpu.
1011 */
1012# ifndef IEM_WITH_SETJMP
1013# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1014 do \
1015 { \
1016 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
1017 if (rcStrict2 != VINF_SUCCESS) \
1018 return rcStrict2; \
1019 } while (0)
1020# else
1021# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
1022# endif
1023
1024# ifndef IEM_WITH_SETJMP
1025/**
1026 * Fetches the next opcode word, zero extending it to a double word.
1027 *
1028 * @returns Strict VBox status code.
1029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1030 * @param pu32 Where to return the opcode double word.
1031 */
1032DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1033{
1034 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1035 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1036 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1037
1038 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1039 pVCpu->iem.s.offOpcode = offOpcode + 2;
1040 return VINF_SUCCESS;
1041}
1042# endif /* !IEM_WITH_SETJMP */
1043
1044/**
1045 * Fetches the next opcode word and zero extends it to a double word, returns
1046 * automatically on failure.
1047 *
1048 * @param a_pu32 Where to return the opcode double word.
1049 * @remark Implicitly references pVCpu.
1050 */
1051# ifndef IEM_WITH_SETJMP
1052# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1053 do \
1054 { \
1055 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1056 if (rcStrict2 != VINF_SUCCESS) \
1057 return rcStrict2; \
1058 } while (0)
1059# else
1060# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1061# endif
1062
1063# ifndef IEM_WITH_SETJMP
1064/**
1065 * Fetches the next opcode word, zero extending it to a quad word.
1066 *
1067 * @returns Strict VBox status code.
1068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1069 * @param pu64 Where to return the opcode quad word.
1070 */
1071DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1072{
1073 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1074 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1075 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1076
1077 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1078 pVCpu->iem.s.offOpcode = offOpcode + 2;
1079 return VINF_SUCCESS;
1080}
1081# endif /* !IEM_WITH_SETJMP */
1082
1083/**
1084 * Fetches the next opcode word and zero extends it to a quad word, returns
1085 * automatically on failure.
1086 *
1087 * @param a_pu64 Where to return the opcode quad word.
1088 * @remark Implicitly references pVCpu.
1089 */
1090# ifndef IEM_WITH_SETJMP
1091# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1092 do \
1093 { \
1094 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1095 if (rcStrict2 != VINF_SUCCESS) \
1096 return rcStrict2; \
1097 } while (0)
1098# else
1099# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1100# endif
1101
1102
1103# ifndef IEM_WITH_SETJMP
1104/**
1105 * Fetches the next signed word from the opcode stream.
1106 *
1107 * @returns Strict VBox status code.
1108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1109 * @param pi16 Where to return the signed word.
1110 */
1111DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1112{
1113 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1114}
1115# endif /* !IEM_WITH_SETJMP */
1116
1117
1118/**
1119 * Fetches the next signed word from the opcode stream, returning automatically
1120 * on failure.
1121 *
1122 * @param a_pi16 Where to return the signed word.
1123 * @remark Implicitly references pVCpu.
1124 */
1125# ifndef IEM_WITH_SETJMP
1126# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1127 do \
1128 { \
1129 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1130 if (rcStrict2 != VINF_SUCCESS) \
1131 return rcStrict2; \
1132 } while (0)
1133# else
1134# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1135# endif
1136
1137# ifndef IEM_WITH_SETJMP
1138
1139/**
1140 * Fetches the next opcode dword.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu32 Where to return the opcode double word.
1145 */
1146DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1147{
1148 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1149 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1150 {
1151 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1152# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1153 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1154# else
1155 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1156 pVCpu->iem.s.abOpcode[offOpcode + 1],
1157 pVCpu->iem.s.abOpcode[offOpcode + 2],
1158 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1159# endif
1160 return VINF_SUCCESS;
1161 }
1162 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1163}
1164
1165# else /* IEM_WITH_SETJMP */
1166
1167/**
1168 * Fetches the next opcode dword, longjmp on error.
1169 *
1170 * @returns The opcode dword.
1171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1172 */
1173DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1174{
1175# ifdef IEM_WITH_CODE_TLB
1176 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1177 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1178 if (RT_LIKELY( pbBuf != NULL
1179 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1180 {
1181 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1183 return *(uint32_t const *)&pbBuf[offBuf];
1184# else
1185 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1186 pbBuf[offBuf + 1],
1187 pbBuf[offBuf + 2],
1188 pbBuf[offBuf + 3]);
1189# endif
1190 }
1191# else
1192 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1193 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1194 {
1195 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1196# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1197 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1198# else
1199 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1200 pVCpu->iem.s.abOpcode[offOpcode + 1],
1201 pVCpu->iem.s.abOpcode[offOpcode + 2],
1202 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1203# endif
1204 }
1205# endif
1206 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1207}
1208
1209# endif /* IEM_WITH_SETJMP */
1210
1211/**
1212 * Fetches the next opcode dword, returns automatically on failure.
1213 *
1214 * @param a_pu32 Where to return the opcode dword.
1215 * @remark Implicitly references pVCpu.
1216 */
1217# ifndef IEM_WITH_SETJMP
1218# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1219 do \
1220 { \
1221 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1222 if (rcStrict2 != VINF_SUCCESS) \
1223 return rcStrict2; \
1224 } while (0)
1225# else
1226# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1227# endif
1228
1229# ifndef IEM_WITH_SETJMP
1230/**
1231 * Fetches the next opcode dword, zero extending it to a quad word.
1232 *
1233 * @returns Strict VBox status code.
1234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1235 * @param pu64 Where to return the opcode quad word.
1236 */
1237DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1238{
1239 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1240 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1241 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1242
1243 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1244 pVCpu->iem.s.abOpcode[offOpcode + 1],
1245 pVCpu->iem.s.abOpcode[offOpcode + 2],
1246 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1247 pVCpu->iem.s.offOpcode = offOpcode + 4;
1248 return VINF_SUCCESS;
1249}
1250# endif /* !IEM_WITH_SETJMP */
1251
1252/**
1253 * Fetches the next opcode dword and zero extends it to a quad word, returns
1254 * automatically on failure.
1255 *
1256 * @param a_pu64 Where to return the opcode quad word.
1257 * @remark Implicitly references pVCpu.
1258 */
1259# ifndef IEM_WITH_SETJMP
1260# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1261 do \
1262 { \
1263 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1264 if (rcStrict2 != VINF_SUCCESS) \
1265 return rcStrict2; \
1266 } while (0)
1267# else
1268# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1269# endif
1270
1271
1272# ifndef IEM_WITH_SETJMP
1273/**
1274 * Fetches the next signed double word from the opcode stream.
1275 *
1276 * @returns Strict VBox status code.
1277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1278 * @param pi32 Where to return the signed double word.
1279 */
1280DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1281{
1282 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1283}
1284# endif
1285
1286/**
1287 * Fetches the next signed double word from the opcode stream, returning
1288 * automatically on failure.
1289 *
1290 * @param a_pi32 Where to return the signed double word.
1291 * @remark Implicitly references pVCpu.
1292 */
1293# ifndef IEM_WITH_SETJMP
1294# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1295 do \
1296 { \
1297 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1298 if (rcStrict2 != VINF_SUCCESS) \
1299 return rcStrict2; \
1300 } while (0)
1301# else
1302# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1303# endif
1304
1305# ifndef IEM_WITH_SETJMP
1306/**
1307 * Fetches the next opcode dword, sign extending it into a quad word.
1308 *
1309 * @returns Strict VBox status code.
1310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1311 * @param pu64 Where to return the opcode quad word.
1312 */
1313DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1314{
1315 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1316 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1317 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1318
1319 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1320 pVCpu->iem.s.abOpcode[offOpcode + 1],
1321 pVCpu->iem.s.abOpcode[offOpcode + 2],
1322 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1323 *pu64 = (uint64_t)(int64_t)i32;
1324 pVCpu->iem.s.offOpcode = offOpcode + 4;
1325 return VINF_SUCCESS;
1326}
1327# endif /* !IEM_WITH_SETJMP */
1328
1329/**
1330 * Fetches the next opcode double word and sign extends it to a quad word,
1331 * returns automatically on failure.
1332 *
1333 * @param a_pu64 Where to return the opcode quad word.
1334 * @remark Implicitly references pVCpu.
1335 */
1336# ifndef IEM_WITH_SETJMP
1337# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1338 do \
1339 { \
1340 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1341 if (rcStrict2 != VINF_SUCCESS) \
1342 return rcStrict2; \
1343 } while (0)
1344# else
1345# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1346# endif
1347
1348# ifndef IEM_WITH_SETJMP
1349
1350/**
1351 * Fetches the next opcode qword.
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1355 * @param pu64 Where to return the opcode qword.
1356 */
1357DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1358{
1359 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1360 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1361 {
1362# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1363 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1364# else
1365 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1366 pVCpu->iem.s.abOpcode[offOpcode + 1],
1367 pVCpu->iem.s.abOpcode[offOpcode + 2],
1368 pVCpu->iem.s.abOpcode[offOpcode + 3],
1369 pVCpu->iem.s.abOpcode[offOpcode + 4],
1370 pVCpu->iem.s.abOpcode[offOpcode + 5],
1371 pVCpu->iem.s.abOpcode[offOpcode + 6],
1372 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1373# endif
1374 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1375 return VINF_SUCCESS;
1376 }
1377 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1378}
1379
1380# else /* IEM_WITH_SETJMP */
1381
1382/**
1383 * Fetches the next opcode qword, longjmp on error.
1384 *
1385 * @returns The opcode qword.
1386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1387 */
1388DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1389{
1390# ifdef IEM_WITH_CODE_TLB
1391 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1392 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1393 if (RT_LIKELY( pbBuf != NULL
1394 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1395 {
1396 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 return *(uint64_t const *)&pbBuf[offBuf];
1399# else
1400 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1401 pbBuf[offBuf + 1],
1402 pbBuf[offBuf + 2],
1403 pbBuf[offBuf + 3],
1404 pbBuf[offBuf + 4],
1405 pbBuf[offBuf + 5],
1406 pbBuf[offBuf + 6],
1407 pbBuf[offBuf + 7]);
1408# endif
1409 }
1410# else
1411 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1412 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1413 {
1414 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1415# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1416 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1417# else
1418 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1419 pVCpu->iem.s.abOpcode[offOpcode + 1],
1420 pVCpu->iem.s.abOpcode[offOpcode + 2],
1421 pVCpu->iem.s.abOpcode[offOpcode + 3],
1422 pVCpu->iem.s.abOpcode[offOpcode + 4],
1423 pVCpu->iem.s.abOpcode[offOpcode + 5],
1424 pVCpu->iem.s.abOpcode[offOpcode + 6],
1425 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1426# endif
1427 }
1428# endif
1429 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1430}
1431
1432# endif /* IEM_WITH_SETJMP */
1433
1434/**
1435 * Fetches the next opcode quad word, returns automatically on failure.
1436 *
1437 * @param a_pu64 Where to return the opcode quad word.
1438 * @remark Implicitly references pVCpu.
1439 */
1440# ifndef IEM_WITH_SETJMP
1441# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1442 do \
1443 { \
1444 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1445 if (rcStrict2 != VINF_SUCCESS) \
1446 return rcStrict2; \
1447 } while (0)
1448# else
1449# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1450# endif
1451
1452#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1453
1454
1455/** @name Misc Worker Functions.
1456 * @{
1457 */
1458
1459/**
1460 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1461 * not (kind of obsolete now).
1462 *
1463 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1464 */
1465#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1466
1467/**
1468 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1469 *
1470 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1471 * @param a_fEfl The new EFLAGS.
1472 */
1473#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1474
1475
1476/**
1477 * Loads a NULL data selector into a selector register, both the hidden and
1478 * visible parts, in protected mode.
1479 *
1480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1481 * @param pSReg Pointer to the segment register.
1482 * @param uRpl The RPL.
1483 */
1484DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1485{
1486 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1487 * data selector in protected mode. */
1488 pSReg->Sel = uRpl;
1489 pSReg->ValidSel = uRpl;
1490 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1491 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1492 {
1493 /* VT-x (Intel 3960x) observed doing something like this. */
1494 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1495 pSReg->u32Limit = UINT32_MAX;
1496 pSReg->u64Base = 0;
1497 }
1498 else
1499 {
1500 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1501 pSReg->u32Limit = 0;
1502 pSReg->u64Base = 0;
1503 }
1504}
1505
1506/** @} */
1507
1508
1509/*
1510 *
1511 * Helpers routines.
1512 * Helpers routines.
1513 * Helpers routines.
1514 *
1515 */
1516
1517#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1518
1519/**
1520 * Recalculates the effective operand size.
1521 *
1522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1523 */
1524DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1525{
1526 switch (IEM_GET_CPU_MODE(pVCpu))
1527 {
1528 case IEMMODE_16BIT:
1529 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1530 break;
1531 case IEMMODE_32BIT:
1532 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1533 break;
1534 case IEMMODE_64BIT:
1535 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1536 {
1537 case 0:
1538 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1539 break;
1540 case IEM_OP_PRF_SIZE_OP:
1541 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1542 break;
1543 case IEM_OP_PRF_SIZE_REX_W:
1544 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1545 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1546 break;
1547 }
1548 break;
1549 default:
1550 AssertFailed();
1551 }
1552}
1553
1554
1555/**
1556 * Sets the default operand size to 64-bit and recalculates the effective
1557 * operand size.
1558 *
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1562{
1563 Assert(IEM_IS_64BIT_CODE(pVCpu));
1564 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1565 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1566 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1567 else
1568 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1569}
1570
1571
1572/**
1573 * Sets the default operand size to 64-bit and recalculates the effective
1574 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1575 *
1576 * This is for the relative jumps.
1577 *
1578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1579 */
1580DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1581{
1582 Assert(IEM_IS_64BIT_CODE(pVCpu));
1583 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1584 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1585 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1586 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1587 else
1588 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1589}
1590
1591#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1592
1593
1594
1595/** @name Register Access.
1596 * @{
1597 */
1598
1599/**
1600 * Gets a reference (pointer) to the specified hidden segment register.
1601 *
1602 * @returns Hidden register reference.
1603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1604 * @param iSegReg The segment register.
1605 */
1606DECLINLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1607{
1608 Assert(iSegReg < X86_SREG_COUNT);
1609 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1610 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1611
1612 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1613 return pSReg;
1614}
1615
1616
1617/**
1618 * Ensures that the given hidden segment register is up to date.
1619 *
1620 * @returns Hidden register reference.
1621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1622 * @param pSReg The segment register.
1623 */
1624DECLINLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1625{
1626 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1627 NOREF(pVCpu);
1628 return pSReg;
1629}
1630
1631
1632/**
1633 * Gets a reference (pointer) to the specified segment register (the selector
1634 * value).
1635 *
1636 * @returns Pointer to the selector variable.
1637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1638 * @param iSegReg The segment register.
1639 */
1640DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1641{
1642 Assert(iSegReg < X86_SREG_COUNT);
1643 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1644 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1645}
1646
1647
1648/**
1649 * Fetches the selector value of a segment register.
1650 *
1651 * @returns The selector value.
1652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1653 * @param iSegReg The segment register.
1654 */
1655DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1656{
1657 Assert(iSegReg < X86_SREG_COUNT);
1658 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1659 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1660}
1661
1662
1663/**
1664 * Fetches the base address value of a segment register.
1665 *
1666 * @returns The selector value.
1667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1668 * @param iSegReg The segment register.
1669 */
1670DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1671{
1672 Assert(iSegReg < X86_SREG_COUNT);
1673 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1674 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1675}
1676
1677
1678/**
1679 * Gets a reference (pointer) to the specified general purpose register.
1680 *
1681 * @returns Register reference.
1682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1683 * @param iReg The general purpose register.
1684 */
1685DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1686{
1687 Assert(iReg < 16);
1688 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1689}
1690
1691
1692#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1693/**
1694 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1695 *
1696 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1697 *
1698 * @returns Register reference.
1699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1700 * @param iReg The register.
1701 */
1702DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1703{
1704 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1705 {
1706 Assert(iReg < 16);
1707 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1708 }
1709 /* high 8-bit register. */
1710 Assert(iReg < 8);
1711 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1712}
1713#endif
1714
1715
1716/**
1717 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1718 * alternative version with extended (20) register index.
1719 *
1720 * @returns Register reference.
1721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1722 * @param iRegEx The register. The 16 first are regular ones,
1723 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1724 */
1725DECLINLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1726{
1727 if (iRegEx < 16)
1728 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1729
1730 /* high 8-bit register. */
1731 Assert(iRegEx < 20);
1732 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1733}
1734
1735
1736/**
1737 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1738 *
1739 * @returns Register reference.
1740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1741 * @param iReg The register.
1742 */
1743DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1744{
1745 Assert(iReg < 16);
1746 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1747}
1748
1749
1750/**
1751 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1752 *
1753 * @returns Register reference.
1754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1755 * @param iReg The register.
1756 */
1757DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1758{
1759 Assert(iReg < 16);
1760 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1761}
1762
1763
1764/**
1765 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1766 *
1767 * @returns Register reference.
1768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1769 * @param iReg The register.
1770 */
1771DECLINLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1772{
1773 Assert(iReg < 16);
1774 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1775}
1776
1777
1778/**
1779 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1780 *
1781 * @returns Register reference.
1782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1783 * @param iReg The register.
1784 */
1785DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1786{
1787 Assert(iReg < 64);
1788 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1789}
1790
1791
1792/**
1793 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1794 *
1795 * @returns Register reference.
1796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1797 * @param iReg The register.
1798 */
1799DECLINLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1800{
1801 Assert(iReg < 16);
1802 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1803}
1804
1805
1806/**
1807 * Gets a reference (pointer) to the specified segment register's base address.
1808 *
1809 * @returns Segment register base address reference.
1810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1811 * @param iSegReg The segment selector.
1812 */
1813DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1814{
1815 Assert(iSegReg < X86_SREG_COUNT);
1816 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1817 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1818}
1819
1820
1821#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1822/**
1823 * Fetches the value of a 8-bit general purpose register.
1824 *
1825 * @returns The register value.
1826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1827 * @param iReg The register.
1828 */
1829DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1830{
1831 return *iemGRegRefU8(pVCpu, iReg);
1832}
1833#endif
1834
1835
1836/**
1837 * Fetches the value of a 8-bit general purpose register, alternative version
1838 * with extended (20) register index.
1839
1840 * @returns The register value.
1841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1842 * @param iRegEx The register. The 16 first are regular ones,
1843 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1844 */
1845DECLINLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1846{
1847 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1848}
1849
1850
1851/**
1852 * Fetches the value of a 16-bit general purpose register.
1853 *
1854 * @returns The register value.
1855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1856 * @param iReg The register.
1857 */
1858DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1859{
1860 Assert(iReg < 16);
1861 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1862}
1863
1864
1865/**
1866 * Fetches the value of a 32-bit general purpose register.
1867 *
1868 * @returns The register value.
1869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1870 * @param iReg The register.
1871 */
1872DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1873{
1874 Assert(iReg < 16);
1875 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1876}
1877
1878
1879/**
1880 * Fetches the value of a 64-bit general purpose register.
1881 *
1882 * @returns The register value.
1883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1884 * @param iReg The register.
1885 */
1886DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1887{
1888 Assert(iReg < 16);
1889 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1890}
1891
1892
1893/**
1894 * Get the address of the top of the stack.
1895 *
1896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1897 */
1898DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1899{
1900 if (IEM_IS_64BIT_CODE(pVCpu))
1901 return pVCpu->cpum.GstCtx.rsp;
1902 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1903 return pVCpu->cpum.GstCtx.esp;
1904 return pVCpu->cpum.GstCtx.sp;
1905}
1906
1907
1908/**
1909 * Updates the RIP/EIP/IP to point to the next instruction.
1910 *
1911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1912 * @param cbInstr The number of bytes to add.
1913 */
1914DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1915{
1916 /*
1917 * Advance RIP.
1918 *
1919 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
1920 * while in all other modes except LM64 the updates are 32-bit. This means
1921 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
1922 * 4GB and 64KB rollovers, and decide whether anything needs masking.
1923 *
1924 * See PC wrap around tests in bs3-cpu-weird-1.
1925 */
1926 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
1927 uint64_t const uRipNext = uRipPrev + cbInstr;
1928 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
1929 || IEM_IS_64BIT_CODE(pVCpu)))
1930 pVCpu->cpum.GstCtx.rip = uRipNext;
1931 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
1932 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
1933 else
1934 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
1935}
1936
1937
1938/**
1939 * Updates the EIP/IP to point to the next instruction - only for 32-bit and
1940 * 16-bit code.
1941 *
1942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1943 * @param cbInstr The number of bytes to add.
1944 */
1945DECL_FORCE_INLINE(void) iemRegAddToEip32(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1946{
1947 /* See comment in iemRegAddToRip. */
1948 uint32_t const uEipPrev = pVCpu->cpum.GstCtx.eip;
1949 uint32_t const uEipNext = uEipPrev + cbInstr;
1950 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
1951 pVCpu->cpum.GstCtx.rip = (uint32_t)uEipNext;
1952 else
1953 pVCpu->cpum.GstCtx.rip = (uint16_t)uEipNext;
1954}
1955
1956
1957/**
1958 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
1959 * following EFLAGS bits are set:
1960 * - X86_EFL_RF - clear it.
1961 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
1962 * - X86_EFL_TF - generate single step \#DB trap.
1963 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
1964 * instruction).
1965 *
1966 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
1967 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
1968 * takes priority over both NMIs and hardware interrupts. So, neither is
1969 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
1970 * either unsupported will be triggered on-top of any \#DB raised here.)
1971 *
1972 * The RF flag only needs to be cleared here as it only suppresses instruction
1973 * breakpoints which are not raised here (happens synchronously during
1974 * instruction fetching).
1975 *
1976 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
1977 * status has no bearing on whether \#DB exceptions are raised.
1978 *
1979 * @note This must *NOT* be called by the two instructions setting the
1980 * CPUMCTX_INHIBIT_SHADOW_SS flag.
1981 *
1982 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
1983 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
1984 * Stacks}
1985 */
1986static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1987{
1988 /*
1989 * Normally we're just here to clear RF and/or interrupt shadow bits.
1990 */
1991 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
1992 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
1993 else
1994 {
1995 /*
1996 * Raise a #DB or/and DBGF event.
1997 */
1998 VBOXSTRICTRC rcStrict;
1999 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
2000 {
2001 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2002 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2003 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
2004 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2005 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2006 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2007 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2008 pVCpu->cpum.GstCtx.rflags.uBoth));
2009
2010 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2011 rcStrict = iemRaiseDebugException(pVCpu);
2012
2013 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2014 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2015 {
2016 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2017 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2018 }
2019 }
2020 else
2021 {
2022 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2023 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2024 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2025 }
2026 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2027 return rcStrict;
2028 }
2029 return VINF_SUCCESS;
2030}
2031
2032
2033/**
2034 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2035 *
2036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2037 */
2038DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2039{
2040 /*
2041 * We assume that most of the time nothing actually needs doing here.
2042 */
2043 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2044 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2045 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2046 return VINF_SUCCESS;
2047 return iemFinishInstructionWithFlagsSet(pVCpu);
2048}
2049
2050
2051/**
2052 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2053 * and CPUMCTX_INHIBIT_SHADOW.
2054 *
2055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2056 * @param cbInstr The number of bytes to add.
2057 */
2058DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2059{
2060 iemRegAddToRip(pVCpu, cbInstr);
2061 return iemRegFinishClearingRF(pVCpu);
2062}
2063
2064
2065/**
2066 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2067 * and CPUMCTX_INHIBIT_SHADOW.
2068 *
2069 * Only called from 64-code code.
2070 *
2071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2072 * @param cbInstr The number of bytes to add.
2073 */
2074DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2075{
2076 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2077 return iemRegFinishClearingRF(pVCpu);
2078}
2079
2080
2081/**
2082 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2083 * CPUMCTX_INHIBIT_SHADOW.
2084 *
2085 * This is never from 64-code code.
2086 *
2087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2088 * @param cbInstr The number of bytes to add.
2089 */
2090DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2091{
2092 iemRegAddToEip32(pVCpu, cbInstr);
2093 return iemRegFinishClearingRF(pVCpu);
2094}
2095
2096
2097/**
2098 * Extended version of iemFinishInstructionWithFlagsSet that goes with
2099 * iemRegAddToRipAndFinishingClearingRfEx.
2100 *
2101 * See iemFinishInstructionWithFlagsSet() for details.
2102 */
2103static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2104{
2105 /*
2106 * Raise a #DB.
2107 */
2108 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2109 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2110 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
2111 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2112 /** @todo Do we set all pending \#DB events, or just one? */
2113 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
2114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2115 pVCpu->cpum.GstCtx.rflags.uBoth));
2116 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2117 return iemRaiseDebugException(pVCpu);
2118}
2119
2120
2121/**
2122 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
2123 * others potentially updating EFLAGS.TF.
2124 *
2125 * The single step event must be generated using the TF value at the start of
2126 * the instruction, not the new value set by it.
2127 *
2128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2129 * @param cbInstr The number of bytes to add.
2130 * @param fEflOld The EFLAGS at the start of the instruction
2131 * execution.
2132 */
2133DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
2134{
2135 iemRegAddToRip(pVCpu, cbInstr);
2136 if (!(fEflOld & X86_EFL_TF))
2137 return iemRegFinishClearingRF(pVCpu);
2138 return iemFinishInstructionWithTfSet(pVCpu);
2139}
2140
2141
2142#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2143/**
2144 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
2145 *
2146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2147 */
2148DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2149{
2150 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2151}
2152#endif
2153
2154
2155/**
2156 * Adds to the stack pointer.
2157 *
2158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2159 * @param cbToAdd The number of bytes to add (8-bit!).
2160 */
2161DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
2162{
2163 if (IEM_IS_64BIT_CODE(pVCpu))
2164 pVCpu->cpum.GstCtx.rsp += cbToAdd;
2165 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2166 pVCpu->cpum.GstCtx.esp += cbToAdd;
2167 else
2168 pVCpu->cpum.GstCtx.sp += cbToAdd;
2169}
2170
2171
2172/**
2173 * Subtracts from the stack pointer.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param cbToSub The number of bytes to subtract (8-bit!).
2177 */
2178DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
2179{
2180 if (IEM_IS_64BIT_CODE(pVCpu))
2181 pVCpu->cpum.GstCtx.rsp -= cbToSub;
2182 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2183 pVCpu->cpum.GstCtx.esp -= cbToSub;
2184 else
2185 pVCpu->cpum.GstCtx.sp -= cbToSub;
2186}
2187
2188
2189/**
2190 * Adds to the temporary stack pointer.
2191 *
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2194 * @param cbToAdd The number of bytes to add (16-bit).
2195 */
2196DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
2197{
2198 if (IEM_IS_64BIT_CODE(pVCpu))
2199 pTmpRsp->u += cbToAdd;
2200 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2201 pTmpRsp->DWords.dw0 += cbToAdd;
2202 else
2203 pTmpRsp->Words.w0 += cbToAdd;
2204}
2205
2206
2207/**
2208 * Subtracts from the temporary stack pointer.
2209 *
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2212 * @param cbToSub The number of bytes to subtract.
2213 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
2214 * expecting that.
2215 */
2216DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
2217{
2218 if (IEM_IS_64BIT_CODE(pVCpu))
2219 pTmpRsp->u -= cbToSub;
2220 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2221 pTmpRsp->DWords.dw0 -= cbToSub;
2222 else
2223 pTmpRsp->Words.w0 -= cbToSub;
2224}
2225
2226
2227/**
2228 * Calculates the effective stack address for a push of the specified size as
2229 * well as the new RSP value (upper bits may be masked).
2230 *
2231 * @returns Effective stack addressf for the push.
2232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2233 * @param cbItem The size of the stack item to pop.
2234 * @param puNewRsp Where to return the new RSP value.
2235 */
2236DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2237{
2238 RTUINT64U uTmpRsp;
2239 RTGCPTR GCPtrTop;
2240 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2241
2242 if (IEM_IS_64BIT_CODE(pVCpu))
2243 GCPtrTop = uTmpRsp.u -= cbItem;
2244 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2245 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2246 else
2247 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2248 *puNewRsp = uTmpRsp.u;
2249 return GCPtrTop;
2250}
2251
2252
2253/**
2254 * Gets the current stack pointer and calculates the value after a pop of the
2255 * specified size.
2256 *
2257 * @returns Current stack pointer.
2258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2259 * @param cbItem The size of the stack item to pop.
2260 * @param puNewRsp Where to return the new RSP value.
2261 */
2262DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2263{
2264 RTUINT64U uTmpRsp;
2265 RTGCPTR GCPtrTop;
2266 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2267
2268 if (IEM_IS_64BIT_CODE(pVCpu))
2269 {
2270 GCPtrTop = uTmpRsp.u;
2271 uTmpRsp.u += cbItem;
2272 }
2273 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2274 {
2275 GCPtrTop = uTmpRsp.DWords.dw0;
2276 uTmpRsp.DWords.dw0 += cbItem;
2277 }
2278 else
2279 {
2280 GCPtrTop = uTmpRsp.Words.w0;
2281 uTmpRsp.Words.w0 += cbItem;
2282 }
2283 *puNewRsp = uTmpRsp.u;
2284 return GCPtrTop;
2285}
2286
2287
2288/**
2289 * Calculates the effective stack address for a push of the specified size as
2290 * well as the new temporary RSP value (upper bits may be masked).
2291 *
2292 * @returns Effective stack addressf for the push.
2293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2294 * @param pTmpRsp The temporary stack pointer. This is updated.
2295 * @param cbItem The size of the stack item to pop.
2296 */
2297DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2298{
2299 RTGCPTR GCPtrTop;
2300
2301 if (IEM_IS_64BIT_CODE(pVCpu))
2302 GCPtrTop = pTmpRsp->u -= cbItem;
2303 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2304 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2305 else
2306 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2307 return GCPtrTop;
2308}
2309
2310
2311/**
2312 * Gets the effective stack address for a pop of the specified size and
2313 * calculates and updates the temporary RSP.
2314 *
2315 * @returns Current stack pointer.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pTmpRsp The temporary stack pointer. This is updated.
2318 * @param cbItem The size of the stack item to pop.
2319 */
2320DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2321{
2322 RTGCPTR GCPtrTop;
2323 if (IEM_IS_64BIT_CODE(pVCpu))
2324 {
2325 GCPtrTop = pTmpRsp->u;
2326 pTmpRsp->u += cbItem;
2327 }
2328 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2329 {
2330 GCPtrTop = pTmpRsp->DWords.dw0;
2331 pTmpRsp->DWords.dw0 += cbItem;
2332 }
2333 else
2334 {
2335 GCPtrTop = pTmpRsp->Words.w0;
2336 pTmpRsp->Words.w0 += cbItem;
2337 }
2338 return GCPtrTop;
2339}
2340
2341/** @} */
2342
2343
2344/** @name FPU access and helpers.
2345 *
2346 * @{
2347 */
2348
2349
2350/**
2351 * Hook for preparing to use the host FPU.
2352 *
2353 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2354 *
2355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2356 */
2357DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2358{
2359#ifdef IN_RING3
2360 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2361#else
2362 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2363#endif
2364 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2365}
2366
2367
2368/**
2369 * Hook for preparing to use the host FPU for SSE.
2370 *
2371 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2372 *
2373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2374 */
2375DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2376{
2377 iemFpuPrepareUsage(pVCpu);
2378}
2379
2380
2381/**
2382 * Hook for preparing to use the host FPU for AVX.
2383 *
2384 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2385 *
2386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2387 */
2388DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2389{
2390 iemFpuPrepareUsage(pVCpu);
2391}
2392
2393
2394/**
2395 * Hook for actualizing the guest FPU state before the interpreter reads it.
2396 *
2397 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2398 *
2399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2400 */
2401DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2402{
2403#ifdef IN_RING3
2404 NOREF(pVCpu);
2405#else
2406 CPUMRZFpuStateActualizeForRead(pVCpu);
2407#endif
2408 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2409}
2410
2411
2412/**
2413 * Hook for actualizing the guest FPU state before the interpreter changes it.
2414 *
2415 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2416 *
2417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2418 */
2419DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2420{
2421#ifdef IN_RING3
2422 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2423#else
2424 CPUMRZFpuStateActualizeForChange(pVCpu);
2425#endif
2426 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2427}
2428
2429
2430/**
2431 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2432 * only.
2433 *
2434 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2435 *
2436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2437 */
2438DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2439{
2440#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2441 NOREF(pVCpu);
2442#else
2443 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2444#endif
2445 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2446}
2447
2448
2449/**
2450 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2451 * read+write.
2452 *
2453 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2454 *
2455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2456 */
2457DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2458{
2459#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2460 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2461#else
2462 CPUMRZFpuStateActualizeForChange(pVCpu);
2463#endif
2464 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2465
2466 /* Make sure any changes are loaded the next time around. */
2467 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2468}
2469
2470
2471/**
2472 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2473 * only.
2474 *
2475 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2476 *
2477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2478 */
2479DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2480{
2481#ifdef IN_RING3
2482 NOREF(pVCpu);
2483#else
2484 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2485#endif
2486 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2487}
2488
2489
2490/**
2491 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2492 * read+write.
2493 *
2494 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2495 *
2496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2497 */
2498DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2499{
2500#ifdef IN_RING3
2501 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2502#else
2503 CPUMRZFpuStateActualizeForChange(pVCpu);
2504#endif
2505 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2506
2507 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2508 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2509}
2510
2511
2512/**
2513 * Stores a QNaN value into a FPU register.
2514 *
2515 * @param pReg Pointer to the register.
2516 */
2517DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2518{
2519 pReg->au32[0] = UINT32_C(0x00000000);
2520 pReg->au32[1] = UINT32_C(0xc0000000);
2521 pReg->au16[4] = UINT16_C(0xffff);
2522}
2523
2524
2525/**
2526 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
2527 *
2528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2529 * @param pFpuCtx The FPU context.
2530 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
2531 */
2532DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
2533{
2534 Assert(uFpuOpcode != UINT16_MAX);
2535 pFpuCtx->FOP = uFpuOpcode;
2536 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2537 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2538 {
2539 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2540 * happens in real mode here based on the fnsave and fnstenv images. */
2541 pFpuCtx->CS = 0;
2542 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2543 }
2544 else if (!IEM_IS_LONG_MODE(pVCpu))
2545 {
2546 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
2547 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2548 }
2549 else
2550 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2551}
2552
2553
2554#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2555/**
2556 * Updates the FOP, FPU.CS and FPUIP registers.
2557 *
2558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2559 * @param pFpuCtx The FPU context.
2560 */
2561DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2562{
2563 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
2564 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, pVCpu->iem.s.uFpuOpcode);
2565}
2566#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
2567
2568
2569/**
2570 * Marks the specified stack register as free (for FFREE).
2571 *
2572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2573 * @param iStReg The register to free.
2574 */
2575DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2576{
2577 Assert(iStReg < 8);
2578 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2579 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2580 pFpuCtx->FTW &= ~RT_BIT(iReg);
2581}
2582
2583
2584/**
2585 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
2586 *
2587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2588 */
2589DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2590{
2591 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2592 uint16_t uFsw = pFpuCtx->FSW;
2593 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2594 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2595 uFsw &= ~X86_FSW_TOP_MASK;
2596 uFsw |= uTop;
2597 pFpuCtx->FSW = uFsw;
2598}
2599
2600
2601/**
2602 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
2603 *
2604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2605 */
2606DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2607{
2608 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2609 uint16_t uFsw = pFpuCtx->FSW;
2610 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2611 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2612 uFsw &= ~X86_FSW_TOP_MASK;
2613 uFsw |= uTop;
2614 pFpuCtx->FSW = uFsw;
2615}
2616
2617
2618
2619
2620DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2621{
2622 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2623 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2624 if (pFpuCtx->FTW & RT_BIT(iReg))
2625 return VINF_SUCCESS;
2626 return VERR_NOT_FOUND;
2627}
2628
2629
2630DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
2631{
2632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2633 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2634 if (pFpuCtx->FTW & RT_BIT(iReg))
2635 {
2636 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
2637 return VINF_SUCCESS;
2638 }
2639 return VERR_NOT_FOUND;
2640}
2641
2642
2643DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
2644 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
2645{
2646 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2647 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2648 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2649 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2650 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2651 {
2652 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2653 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
2654 return VINF_SUCCESS;
2655 }
2656 return VERR_NOT_FOUND;
2657}
2658
2659
2660DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
2661{
2662 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2663 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2664 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2665 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2666 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2667 {
2668 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2669 return VINF_SUCCESS;
2670 }
2671 return VERR_NOT_FOUND;
2672}
2673
2674
2675/**
2676 * Rotates the stack registers when setting new TOS.
2677 *
2678 * @param pFpuCtx The FPU context.
2679 * @param iNewTop New TOS value.
2680 * @remarks We only do this to speed up fxsave/fxrstor which
2681 * arrange the FP registers in stack order.
2682 * MUST be done before writing the new TOS (FSW).
2683 */
2684DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
2685{
2686 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2687 RTFLOAT80U ar80Temp[8];
2688
2689 if (iOldTop == iNewTop)
2690 return;
2691
2692 /* Unscrew the stack and get it into 'native' order. */
2693 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
2694 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
2695 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
2696 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
2697 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
2698 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
2699 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
2700 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
2701
2702 /* Now rotate the stack to the new position. */
2703 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
2704 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
2705 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
2706 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
2707 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
2708 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
2709 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
2710 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
2711}
2712
2713
2714/**
2715 * Updates the FPU exception status after FCW is changed.
2716 *
2717 * @param pFpuCtx The FPU context.
2718 */
2719DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2720{
2721 uint16_t u16Fsw = pFpuCtx->FSW;
2722 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
2723 u16Fsw |= X86_FSW_ES | X86_FSW_B;
2724 else
2725 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
2726 pFpuCtx->FSW = u16Fsw;
2727}
2728
2729
2730/**
2731 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
2732 *
2733 * @returns The full FTW.
2734 * @param pFpuCtx The FPU context.
2735 */
2736DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
2737{
2738 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
2739 uint16_t u16Ftw = 0;
2740 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2741 for (unsigned iSt = 0; iSt < 8; iSt++)
2742 {
2743 unsigned const iReg = (iSt + iTop) & 7;
2744 if (!(u8Ftw & RT_BIT(iReg)))
2745 u16Ftw |= 3 << (iReg * 2); /* empty */
2746 else
2747 {
2748 uint16_t uTag;
2749 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
2750 if (pr80Reg->s.uExponent == 0x7fff)
2751 uTag = 2; /* Exponent is all 1's => Special. */
2752 else if (pr80Reg->s.uExponent == 0x0000)
2753 {
2754 if (pr80Reg->s.uMantissa == 0x0000)
2755 uTag = 1; /* All bits are zero => Zero. */
2756 else
2757 uTag = 2; /* Must be special. */
2758 }
2759 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
2760 uTag = 0; /* Valid. */
2761 else
2762 uTag = 2; /* Must be special. */
2763
2764 u16Ftw |= uTag << (iReg * 2);
2765 }
2766 }
2767
2768 return u16Ftw;
2769}
2770
2771
2772/**
2773 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
2774 *
2775 * @returns The compressed FTW.
2776 * @param u16FullFtw The full FTW to convert.
2777 */
2778DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
2779{
2780 uint8_t u8Ftw = 0;
2781 for (unsigned i = 0; i < 8; i++)
2782 {
2783 if ((u16FullFtw & 3) != 3 /*empty*/)
2784 u8Ftw |= RT_BIT(i);
2785 u16FullFtw >>= 2;
2786 }
2787
2788 return u8Ftw;
2789}
2790
2791/** @} */
2792
2793
2794/** @name Memory access.
2795 *
2796 * @{
2797 */
2798
2799
2800/**
2801 * Checks whether alignment checks are enabled or not.
2802 *
2803 * @returns true if enabled, false if not.
2804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2805 */
2806DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
2807{
2808 AssertCompile(X86_CR0_AM == X86_EFL_AC);
2809 return IEM_GET_CPL(pVCpu) == 3
2810 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
2811}
2812
2813/**
2814 * Checks if the given segment can be written to, raise the appropriate
2815 * exception if not.
2816 *
2817 * @returns VBox strict status code.
2818 *
2819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2820 * @param pHid Pointer to the hidden register.
2821 * @param iSegReg The register number.
2822 * @param pu64BaseAddr Where to return the base address to use for the
2823 * segment. (In 64-bit code it may differ from the
2824 * base in the hidden segment.)
2825 */
2826DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2827 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2828{
2829 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2830
2831 if (IEM_IS_64BIT_CODE(pVCpu))
2832 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2833 else
2834 {
2835 if (!pHid->Attr.n.u1Present)
2836 {
2837 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2838 AssertRelease(uSel == 0);
2839 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2840 return iemRaiseGeneralProtectionFault0(pVCpu);
2841 }
2842
2843 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
2844 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2845 && !IEM_IS_64BIT_CODE(pVCpu) )
2846 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2847 *pu64BaseAddr = pHid->u64Base;
2848 }
2849 return VINF_SUCCESS;
2850}
2851
2852
2853/**
2854 * Checks if the given segment can be read from, raise the appropriate
2855 * exception if not.
2856 *
2857 * @returns VBox strict status code.
2858 *
2859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2860 * @param pHid Pointer to the hidden register.
2861 * @param iSegReg The register number.
2862 * @param pu64BaseAddr Where to return the base address to use for the
2863 * segment. (In 64-bit code it may differ from the
2864 * base in the hidden segment.)
2865 */
2866DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2867 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2868{
2869 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2870
2871 if (IEM_IS_64BIT_CODE(pVCpu))
2872 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2873 else
2874 {
2875 if (!pHid->Attr.n.u1Present)
2876 {
2877 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2878 AssertRelease(uSel == 0);
2879 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2880 return iemRaiseGeneralProtectionFault0(pVCpu);
2881 }
2882
2883 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2884 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2885 *pu64BaseAddr = pHid->u64Base;
2886 }
2887 return VINF_SUCCESS;
2888}
2889
2890
2891/**
2892 * Maps a physical page.
2893 *
2894 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2896 * @param GCPhysMem The physical address.
2897 * @param fAccess The intended access.
2898 * @param ppvMem Where to return the mapping address.
2899 * @param pLock The PGM lock.
2900 */
2901DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2902 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2903{
2904#ifdef IEM_LOG_MEMORY_WRITES
2905 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2906 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2907#endif
2908
2909 /** @todo This API may require some improving later. A private deal with PGM
2910 * regarding locking and unlocking needs to be struct. A couple of TLBs
2911 * living in PGM, but with publicly accessible inlined access methods
2912 * could perhaps be an even better solution. */
2913 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
2914 GCPhysMem,
2915 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2916 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
2917 ppvMem,
2918 pLock);
2919 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
2920 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
2921
2922 return rc;
2923}
2924
2925
2926/**
2927 * Unmap a page previously mapped by iemMemPageMap.
2928 *
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 * @param GCPhysMem The physical address.
2931 * @param fAccess The intended access.
2932 * @param pvMem What iemMemPageMap returned.
2933 * @param pLock The PGM lock.
2934 */
2935DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2936 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2937{
2938 NOREF(pVCpu);
2939 NOREF(GCPhysMem);
2940 NOREF(fAccess);
2941 NOREF(pvMem);
2942 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
2943}
2944
2945#ifdef IEM_WITH_SETJMP
2946
2947/** @todo slim this down */
2948DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
2949 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2950{
2951 Assert(cbMem >= 1);
2952 Assert(iSegReg < X86_SREG_COUNT);
2953
2954 /*
2955 * 64-bit mode is simpler.
2956 */
2957 if (IEM_IS_64BIT_CODE(pVCpu))
2958 {
2959 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
2960 {
2961 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2962 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2963 GCPtrMem += pSel->u64Base;
2964 }
2965
2966 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2967 return GCPtrMem;
2968 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2969 }
2970 /*
2971 * 16-bit and 32-bit segmentation.
2972 */
2973 else if (iSegReg != UINT8_MAX)
2974 {
2975 /** @todo Does this apply to segments with 4G-1 limit? */
2976 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
2977 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
2978 {
2979 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2980 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2981 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2982 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
2983 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
2984 | X86_SEL_TYPE_CODE))
2985 {
2986 case X86DESCATTR_P: /* readonly data, expand up */
2987 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
2988 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
2989 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
2990 /* expand up */
2991 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
2992 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2993 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
2994 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
2995 break;
2996
2997 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
2998 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
2999 /* expand down */
3000 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3001 && ( pSel->Attr.n.u1DefBig
3002 || GCPtrLast32 <= UINT32_C(0xffff)) ))
3003 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3004 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
3005 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
3006 break;
3007
3008 default:
3009 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
3010 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3011 break;
3012 }
3013 }
3014 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
3015 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3016 }
3017 /*
3018 * 32-bit flat address.
3019 */
3020 else
3021 return GCPtrMem;
3022}
3023
3024
3025/** @todo slim this down */
3026DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
3027 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3028{
3029 Assert(cbMem >= 1);
3030 Assert(iSegReg < X86_SREG_COUNT);
3031
3032 /*
3033 * 64-bit mode is simpler.
3034 */
3035 if (IEM_IS_64BIT_CODE(pVCpu))
3036 {
3037 if (iSegReg >= X86_SREG_FS)
3038 {
3039 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3040 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3041 GCPtrMem += pSel->u64Base;
3042 }
3043
3044 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3045 return GCPtrMem;
3046 }
3047 /*
3048 * 16-bit and 32-bit segmentation.
3049 */
3050 else
3051 {
3052 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3053 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3054 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3055 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
3056 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
3057 {
3058 /* expand up */
3059 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
3060 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
3061 && GCPtrLast32 > (uint32_t)GCPtrMem))
3062 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3063 }
3064 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
3065 {
3066 /* expand down */
3067 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
3068 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3069 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3070 && GCPtrLast32 > (uint32_t)GCPtrMem))
3071 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3072 }
3073 else
3074 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3075 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3076 }
3077 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3078}
3079
3080#endif /* IEM_WITH_SETJMP */
3081
3082/**
3083 * Fakes a long mode stack selector for SS = 0.
3084 *
3085 * @param pDescSs Where to return the fake stack descriptor.
3086 * @param uDpl The DPL we want.
3087 */
3088DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
3089{
3090 pDescSs->Long.au64[0] = 0;
3091 pDescSs->Long.au64[1] = 0;
3092 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3093 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
3094 pDescSs->Long.Gen.u2Dpl = uDpl;
3095 pDescSs->Long.Gen.u1Present = 1;
3096 pDescSs->Long.Gen.u1Long = 1;
3097}
3098
3099/** @} */
3100
3101
3102#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3103
3104/**
3105 * Gets CR0 fixed-0 bits in VMX operation.
3106 *
3107 * We do this rather than fetching what we report to the guest (in
3108 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
3109 * values regardless of whether unrestricted-guest feature is available on the CPU.
3110 *
3111 * @returns CR0 fixed-0 bits.
3112 * @param pVCpu The cross context virtual CPU structure.
3113 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
3114 * must be returned. When @c false, the CR0 fixed-0
3115 * bits for VMX root mode is returned.
3116 *
3117 */
3118DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
3119{
3120 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
3121
3122 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
3123 if ( fVmxNonRootMode
3124 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
3125 return VMX_V_CR0_FIXED0_UX;
3126 return VMX_V_CR0_FIXED0;
3127}
3128
3129
3130/**
3131 * Sets virtual-APIC write emulation as pending.
3132 *
3133 * @param pVCpu The cross context virtual CPU structure.
3134 * @param offApic The offset in the virtual-APIC page that was written.
3135 */
3136DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
3137{
3138 Assert(offApic < XAPIC_OFF_END + 4);
3139
3140 /*
3141 * Record the currently updated APIC offset, as we need this later for figuring
3142 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3143 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3144 */
3145 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3146
3147 /*
3148 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3149 * virtualization or APIC-write emulation).
3150 */
3151 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3152 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3153}
3154
3155#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3156
3157#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette