VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 98103

Last change on this file since 98103 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.8 KB
Line 
1/* $Id: IEMInline.h 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 if (RT_SUCCESS(rcStrict))
49 {
50 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
51 || rcStrict == VINF_IOM_R3_IOPORT_READ
52 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
53 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
54 || rcStrict == VINF_IOM_R3_MMIO_READ
55 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
56 || rcStrict == VINF_IOM_R3_MMIO_WRITE
57 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
58 || rcStrict == VINF_CPUM_R3_MSR_READ
59 || rcStrict == VINF_CPUM_R3_MSR_WRITE
60 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
61 || rcStrict == VINF_EM_RAW_TO_R3
62 || rcStrict == VINF_EM_TRIPLE_FAULT
63 || rcStrict == VINF_GIM_R3_HYPERCALL
64 /* raw-mode / virt handlers only: */
65 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
66 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
67 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
68 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
69 || rcStrict == VINF_SELM_SYNC_GDT
70 || rcStrict == VINF_CSAM_PENDING_ACTION
71 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
72 /* nested hw.virt codes: */
73 || rcStrict == VINF_VMX_VMEXIT
74 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
75 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
76 || rcStrict == VINF_SVM_VMEXIT
77 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
78/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
79 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
80#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
81 if ( rcStrict == VINF_VMX_VMEXIT
82 && rcPassUp == VINF_SUCCESS)
83 rcStrict = VINF_SUCCESS;
84 else
85#endif
86#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
87 if ( rcStrict == VINF_SVM_VMEXIT
88 && rcPassUp == VINF_SUCCESS)
89 rcStrict = VINF_SUCCESS;
90 else
91#endif
92 if (rcPassUp == VINF_SUCCESS)
93 pVCpu->iem.s.cRetInfStatuses++;
94 else if ( rcPassUp < VINF_EM_FIRST
95 || rcPassUp > VINF_EM_LAST
96 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
97 {
98 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
99 pVCpu->iem.s.cRetPassUpStatus++;
100 rcStrict = rcPassUp;
101 }
102 else
103 {
104 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
105 pVCpu->iem.s.cRetInfStatuses++;
106 }
107 }
108 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
109 pVCpu->iem.s.cRetAspectNotImplemented++;
110 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
111 pVCpu->iem.s.cRetInstrNotImplemented++;
112 else
113 pVCpu->iem.s.cRetErrStatuses++;
114 }
115 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
116 {
117 pVCpu->iem.s.cRetPassUpStatus++;
118 rcStrict = pVCpu->iem.s.rcPassUp;
119 }
120
121 return rcStrict;
122}
123
124
125/**
126 * Sets the pass up status.
127 *
128 * @returns VINF_SUCCESS.
129 * @param pVCpu The cross context virtual CPU structure of the
130 * calling thread.
131 * @param rcPassUp The pass up status. Must be informational.
132 * VINF_SUCCESS is not allowed.
133 */
134DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
135{
136 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
137
138 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
139 if (rcOldPassUp == VINF_SUCCESS)
140 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
141 /* If both are EM scheduling codes, use EM priority rules. */
142 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
143 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
144 {
145 if (rcPassUp < rcOldPassUp)
146 {
147 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
148 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
149 }
150 else
151 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
152 }
153 /* Override EM scheduling with specific status code. */
154 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
155 {
156 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
157 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
158 }
159 /* Don't override specific status code, first come first served. */
160 else
161 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
162 return VINF_SUCCESS;
163}
164
165
166/**
167 * Calculates the CPU mode.
168 *
169 * This is mainly for updating IEMCPU::enmCpuMode.
170 *
171 * @returns CPU mode.
172 * @param pVCpu The cross context virtual CPU structure of the
173 * calling thread.
174 */
175DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu) RT_NOEXCEPT
176{
177 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
178 return IEMMODE_64BIT;
179 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
180 return IEMMODE_32BIT;
181 return IEMMODE_16BIT;
182}
183
184
185#if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
186/**
187 * Initializes the execution state.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the
190 * calling thread.
191 * @param fBypassHandlers Whether to bypass access handlers.
192 *
193 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
194 * side-effects in strict builds.
195 */
196DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers) RT_NOEXCEPT
197{
198 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
199 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
208
209 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
210 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
211# ifdef VBOX_STRICT
212 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
213 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
214 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
215 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
216 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
217 pVCpu->iem.s.uRexReg = 127;
218 pVCpu->iem.s.uRexB = 127;
219 pVCpu->iem.s.offModRm = 127;
220 pVCpu->iem.s.uRexIndex = 127;
221 pVCpu->iem.s.iEffSeg = 127;
222 pVCpu->iem.s.idxPrefix = 127;
223 pVCpu->iem.s.uVex3rdReg = 127;
224 pVCpu->iem.s.uVexLength = 127;
225 pVCpu->iem.s.fEvexStuff = 127;
226 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
227# ifdef IEM_WITH_CODE_TLB
228 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
229 pVCpu->iem.s.pbInstrBuf = NULL;
230 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
231 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
232 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
233 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
234# else
235 pVCpu->iem.s.offOpcode = 127;
236 pVCpu->iem.s.cbOpcode = 127;
237# endif
238# endif /* VBOX_STRICT */
239
240 pVCpu->iem.s.cActiveMappings = 0;
241 pVCpu->iem.s.iNextMapping = 0;
242 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
243 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
244 pVCpu->iem.s.fDisregardLock = false;
245 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
246 pVCpu->iem.s.fPendingDataBreakpoints = false;
247 pVCpu->iem.s.fPendingIoBreakpoints = false;
248 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
249 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
250 { /* likely */ }
251 else
252 iemInitPendingBreakpointsSlow(pVCpu);
253}
254#endif /* VBOX_INCLUDED_vmm_dbgf_h */
255
256
257#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
258/**
259 * Performs a minimal reinitialization of the execution state.
260 *
261 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
262 * 'world-switch' types operations on the CPU. Currently only nested
263 * hardware-virtualization uses it.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
268{
269 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
270 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
271
272 pVCpu->iem.s.uCpl = uCpl;
273 pVCpu->iem.s.enmCpuMode = enmMode;
274 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffAddrMode = enmMode;
276 if (enmMode != IEMMODE_64BIT)
277 {
278 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
279 pVCpu->iem.s.enmEffOpSize = enmMode;
280 }
281 else
282 {
283 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
284 pVCpu->iem.s.enmEffOpSize = enmMode;
285 }
286 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
287# ifndef IEM_WITH_CODE_TLB
288 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
289 pVCpu->iem.s.offOpcode = 0;
290 pVCpu->iem.s.cbOpcode = 0;
291# endif
292 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
293}
294#endif
295
296
297/**
298 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
299 *
300 * @param pVCpu The cross context virtual CPU structure of the
301 * calling thread.
302 */
303DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
304{
305 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
306#ifdef VBOX_STRICT
307# ifdef IEM_WITH_CODE_TLB
308 NOREF(pVCpu);
309# else
310 pVCpu->iem.s.cbOpcode = 0;
311# endif
312#else
313 NOREF(pVCpu);
314#endif
315}
316
317
318/**
319 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
320 *
321 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
322 *
323 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
325 * @param rcStrict The status code to fiddle.
326 */
327DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
328{
329 iemUninitExec(pVCpu);
330 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
331}
332
333
334/**
335 * Macro used by the IEMExec* method to check the given instruction length.
336 *
337 * Will return on failure!
338 *
339 * @param a_cbInstr The given instruction length.
340 * @param a_cbMin The minimum length.
341 */
342#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
343 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
344 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
345
346
347#ifndef IEM_WITH_SETJMP
348
349/**
350 * Fetches the first opcode byte.
351 *
352 * @returns Strict VBox status code.
353 * @param pVCpu The cross context virtual CPU structure of the
354 * calling thread.
355 * @param pu8 Where to return the opcode byte.
356 */
357DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
358{
359 /*
360 * Check for hardware instruction breakpoints.
361 */
362 if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
363 { /* likely */ }
364 else
365 {
366 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
367 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
368 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
369 { /* likely */ }
370 else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
371 return iemRaiseDebugException(pVCpu);
372 else
373 return rcStrict;
374 }
375
376 /*
377 * Fetch the first opcode byte.
378 */
379 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
380 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
381 {
382 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
383 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
384 return VINF_SUCCESS;
385 }
386 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
387}
388
389#else /* IEM_WITH_SETJMP */
390
391/**
392 * Fetches the first opcode byte, longjmp on error.
393 *
394 * @returns The opcode byte.
395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
396 */
397DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
398{
399 /*
400 * Check for hardware instruction breakpoints.
401 */
402 if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
403 { /* likely */ }
404 else
405 {
406 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
407 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
408 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
409 { /* likely */ }
410 else
411 {
412 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
413 rcStrict = iemRaiseDebugException(pVCpu);
414 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
415 }
416 }
417
418 /*
419 * Fetch the first opcode byte.
420 */
421# ifdef IEM_WITH_CODE_TLB
422 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
423 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
424 if (RT_LIKELY( pbBuf != NULL
425 && offBuf < pVCpu->iem.s.cbInstrBuf))
426 {
427 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
428 return pbBuf[offBuf];
429 }
430# else
431 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
432 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
433 {
434 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
435 return pVCpu->iem.s.abOpcode[offOpcode];
436 }
437# endif
438 return iemOpcodeGetNextU8SlowJmp(pVCpu);
439}
440
441#endif /* IEM_WITH_SETJMP */
442
443/**
444 * Fetches the first opcode byte, returns/throws automatically on failure.
445 *
446 * @param a_pu8 Where to return the opcode byte.
447 * @remark Implicitly references pVCpu.
448 */
449#ifndef IEM_WITH_SETJMP
450# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
451 do \
452 { \
453 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
454 if (rcStrict2 == VINF_SUCCESS) \
455 { /* likely */ } \
456 else \
457 return rcStrict2; \
458 } while (0)
459#else
460# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
461#endif /* IEM_WITH_SETJMP */
462
463
464#ifndef IEM_WITH_SETJMP
465
466/**
467 * Fetches the next opcode byte.
468 *
469 * @returns Strict VBox status code.
470 * @param pVCpu The cross context virtual CPU structure of the
471 * calling thread.
472 * @param pu8 Where to return the opcode byte.
473 */
474DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
475{
476 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
477 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
478 {
479 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
480 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
481 return VINF_SUCCESS;
482 }
483 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
484}
485
486#else /* IEM_WITH_SETJMP */
487
488/**
489 * Fetches the next opcode byte, longjmp on error.
490 *
491 * @returns The opcode byte.
492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
493 */
494DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
495{
496# ifdef IEM_WITH_CODE_TLB
497 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
498 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
499 if (RT_LIKELY( pbBuf != NULL
500 && offBuf < pVCpu->iem.s.cbInstrBuf))
501 {
502 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
503 return pbBuf[offBuf];
504 }
505# else
506 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
507 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
508 {
509 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
510 return pVCpu->iem.s.abOpcode[offOpcode];
511 }
512# endif
513 return iemOpcodeGetNextU8SlowJmp(pVCpu);
514}
515
516#endif /* IEM_WITH_SETJMP */
517
518/**
519 * Fetches the next opcode byte, returns automatically on failure.
520 *
521 * @param a_pu8 Where to return the opcode byte.
522 * @remark Implicitly references pVCpu.
523 */
524#ifndef IEM_WITH_SETJMP
525# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
526 do \
527 { \
528 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
529 if (rcStrict2 == VINF_SUCCESS) \
530 { /* likely */ } \
531 else \
532 return rcStrict2; \
533 } while (0)
534#else
535# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
536#endif /* IEM_WITH_SETJMP */
537
538
539#ifndef IEM_WITH_SETJMP
540/**
541 * Fetches the next signed byte from the opcode stream.
542 *
543 * @returns Strict VBox status code.
544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
545 * @param pi8 Where to return the signed byte.
546 */
547DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
548{
549 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
550}
551#endif /* !IEM_WITH_SETJMP */
552
553
554/**
555 * Fetches the next signed byte from the opcode stream, returning automatically
556 * on failure.
557 *
558 * @param a_pi8 Where to return the signed byte.
559 * @remark Implicitly references pVCpu.
560 */
561#ifndef IEM_WITH_SETJMP
562# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
563 do \
564 { \
565 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
566 if (rcStrict2 != VINF_SUCCESS) \
567 return rcStrict2; \
568 } while (0)
569#else /* IEM_WITH_SETJMP */
570# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
571
572#endif /* IEM_WITH_SETJMP */
573
574
575#ifndef IEM_WITH_SETJMP
576/**
577 * Fetches the next signed byte from the opcode stream, extending it to
578 * unsigned 16-bit.
579 *
580 * @returns Strict VBox status code.
581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
582 * @param pu16 Where to return the unsigned word.
583 */
584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
585{
586 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
587 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
588 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
589
590 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
591 pVCpu->iem.s.offOpcode = offOpcode + 1;
592 return VINF_SUCCESS;
593}
594#endif /* !IEM_WITH_SETJMP */
595
596/**
597 * Fetches the next signed byte from the opcode stream and sign-extending it to
598 * a word, returning automatically on failure.
599 *
600 * @param a_pu16 Where to return the word.
601 * @remark Implicitly references pVCpu.
602 */
603#ifndef IEM_WITH_SETJMP
604# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
605 do \
606 { \
607 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
608 if (rcStrict2 != VINF_SUCCESS) \
609 return rcStrict2; \
610 } while (0)
611#else
612# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
613#endif
614
615#ifndef IEM_WITH_SETJMP
616/**
617 * Fetches the next signed byte from the opcode stream, extending it to
618 * unsigned 32-bit.
619 *
620 * @returns Strict VBox status code.
621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
622 * @param pu32 Where to return the unsigned dword.
623 */
624DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
625{
626 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
627 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
628 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
629
630 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
631 pVCpu->iem.s.offOpcode = offOpcode + 1;
632 return VINF_SUCCESS;
633}
634#endif /* !IEM_WITH_SETJMP */
635
636/**
637 * Fetches the next signed byte from the opcode stream and sign-extending it to
638 * a word, returning automatically on failure.
639 *
640 * @param a_pu32 Where to return the word.
641 * @remark Implicitly references pVCpu.
642 */
643#ifndef IEM_WITH_SETJMP
644# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
645 do \
646 { \
647 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
648 if (rcStrict2 != VINF_SUCCESS) \
649 return rcStrict2; \
650 } while (0)
651#else
652# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
653#endif
654
655
656#ifndef IEM_WITH_SETJMP
657/**
658 * Fetches the next signed byte from the opcode stream, extending it to
659 * unsigned 64-bit.
660 *
661 * @returns Strict VBox status code.
662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
663 * @param pu64 Where to return the unsigned qword.
664 */
665DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
666{
667 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
668 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
669 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
670
671 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
672 pVCpu->iem.s.offOpcode = offOpcode + 1;
673 return VINF_SUCCESS;
674}
675#endif /* !IEM_WITH_SETJMP */
676
677/**
678 * Fetches the next signed byte from the opcode stream and sign-extending it to
679 * a word, returning automatically on failure.
680 *
681 * @param a_pu64 Where to return the word.
682 * @remark Implicitly references pVCpu.
683 */
684#ifndef IEM_WITH_SETJMP
685# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
686 do \
687 { \
688 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
689 if (rcStrict2 != VINF_SUCCESS) \
690 return rcStrict2; \
691 } while (0)
692#else
693# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
694#endif
695
696
697#ifndef IEM_WITH_SETJMP
698/**
699 * Fetches the next opcode byte.
700 *
701 * @returns Strict VBox status code.
702 * @param pVCpu The cross context virtual CPU structure of the
703 * calling thread.
704 * @param pu8 Where to return the opcode byte.
705 */
706DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
707{
708 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
709 pVCpu->iem.s.offModRm = offOpcode;
710 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
711 {
712 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
713 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
714 return VINF_SUCCESS;
715 }
716 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
717}
718#else /* IEM_WITH_SETJMP */
719/**
720 * Fetches the next opcode byte, longjmp on error.
721 *
722 * @returns The opcode byte.
723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
724 */
725DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
726{
727# ifdef IEM_WITH_CODE_TLB
728 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
729 pVCpu->iem.s.offModRm = offBuf;
730 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
731 if (RT_LIKELY( pbBuf != NULL
732 && offBuf < pVCpu->iem.s.cbInstrBuf))
733 {
734 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
735 return pbBuf[offBuf];
736 }
737# else
738 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
739 pVCpu->iem.s.offModRm = offOpcode;
740 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
741 {
742 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
743 return pVCpu->iem.s.abOpcode[offOpcode];
744 }
745# endif
746 return iemOpcodeGetNextU8SlowJmp(pVCpu);
747}
748#endif /* IEM_WITH_SETJMP */
749
750/**
751 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
752 * on failure.
753 *
754 * Will note down the position of the ModR/M byte for VT-x exits.
755 *
756 * @param a_pbRm Where to return the RM opcode byte.
757 * @remark Implicitly references pVCpu.
758 */
759#ifndef IEM_WITH_SETJMP
760# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
761 do \
762 { \
763 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
764 if (rcStrict2 == VINF_SUCCESS) \
765 { /* likely */ } \
766 else \
767 return rcStrict2; \
768 } while (0)
769#else
770# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
771#endif /* IEM_WITH_SETJMP */
772
773
774#ifndef IEM_WITH_SETJMP
775
776/**
777 * Fetches the next opcode word.
778 *
779 * @returns Strict VBox status code.
780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
781 * @param pu16 Where to return the opcode word.
782 */
783DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
784{
785 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
786 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
787 {
788 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
789# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
790 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
791# else
792 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
793# endif
794 return VINF_SUCCESS;
795 }
796 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
797}
798
799#else /* IEM_WITH_SETJMP */
800
801/**
802 * Fetches the next opcode word, longjmp on error.
803 *
804 * @returns The opcode word.
805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
806 */
807DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
808{
809# ifdef IEM_WITH_CODE_TLB
810 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
811 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
812 if (RT_LIKELY( pbBuf != NULL
813 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
814 {
815 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
816# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
817 return *(uint16_t const *)&pbBuf[offBuf];
818# else
819 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
820# endif
821 }
822# else /* !IEM_WITH_CODE_TLB */
823 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
824 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
825 {
826 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
827# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
828 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
829# else
830 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
831# endif
832 }
833# endif /* !IEM_WITH_CODE_TLB */
834 return iemOpcodeGetNextU16SlowJmp(pVCpu);
835}
836
837#endif /* IEM_WITH_SETJMP */
838
839/**
840 * Fetches the next opcode word, returns automatically on failure.
841 *
842 * @param a_pu16 Where to return the opcode word.
843 * @remark Implicitly references pVCpu.
844 */
845#ifndef IEM_WITH_SETJMP
846# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
847 do \
848 { \
849 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
850 if (rcStrict2 != VINF_SUCCESS) \
851 return rcStrict2; \
852 } while (0)
853#else
854# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
855#endif
856
857#ifndef IEM_WITH_SETJMP
858/**
859 * Fetches the next opcode word, zero extending it to a double word.
860 *
861 * @returns Strict VBox status code.
862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
863 * @param pu32 Where to return the opcode double word.
864 */
865DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
866{
867 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
868 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
869 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
870
871 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
872 pVCpu->iem.s.offOpcode = offOpcode + 2;
873 return VINF_SUCCESS;
874}
875#endif /* !IEM_WITH_SETJMP */
876
877/**
878 * Fetches the next opcode word and zero extends it to a double word, returns
879 * automatically on failure.
880 *
881 * @param a_pu32 Where to return the opcode double word.
882 * @remark Implicitly references pVCpu.
883 */
884#ifndef IEM_WITH_SETJMP
885# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
886 do \
887 { \
888 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
889 if (rcStrict2 != VINF_SUCCESS) \
890 return rcStrict2; \
891 } while (0)
892#else
893# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
894#endif
895
896#ifndef IEM_WITH_SETJMP
897/**
898 * Fetches the next opcode word, zero extending it to a quad word.
899 *
900 * @returns Strict VBox status code.
901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
902 * @param pu64 Where to return the opcode quad word.
903 */
904DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
905{
906 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
907 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
908 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
909
910 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
911 pVCpu->iem.s.offOpcode = offOpcode + 2;
912 return VINF_SUCCESS;
913}
914#endif /* !IEM_WITH_SETJMP */
915
916/**
917 * Fetches the next opcode word and zero extends it to a quad word, returns
918 * automatically on failure.
919 *
920 * @param a_pu64 Where to return the opcode quad word.
921 * @remark Implicitly references pVCpu.
922 */
923#ifndef IEM_WITH_SETJMP
924# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
925 do \
926 { \
927 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
928 if (rcStrict2 != VINF_SUCCESS) \
929 return rcStrict2; \
930 } while (0)
931#else
932# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
933#endif
934
935
936#ifndef IEM_WITH_SETJMP
937/**
938 * Fetches the next signed word from the opcode stream.
939 *
940 * @returns Strict VBox status code.
941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
942 * @param pi16 Where to return the signed word.
943 */
944DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
945{
946 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
947}
948#endif /* !IEM_WITH_SETJMP */
949
950
951/**
952 * Fetches the next signed word from the opcode stream, returning automatically
953 * on failure.
954 *
955 * @param a_pi16 Where to return the signed word.
956 * @remark Implicitly references pVCpu.
957 */
958#ifndef IEM_WITH_SETJMP
959# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
960 do \
961 { \
962 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
963 if (rcStrict2 != VINF_SUCCESS) \
964 return rcStrict2; \
965 } while (0)
966#else
967# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
968#endif
969
970#ifndef IEM_WITH_SETJMP
971
972/**
973 * Fetches the next opcode dword.
974 *
975 * @returns Strict VBox status code.
976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
977 * @param pu32 Where to return the opcode double word.
978 */
979DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
980{
981 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
982 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
983 {
984 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
985# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
986 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
987# else
988 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
989 pVCpu->iem.s.abOpcode[offOpcode + 1],
990 pVCpu->iem.s.abOpcode[offOpcode + 2],
991 pVCpu->iem.s.abOpcode[offOpcode + 3]);
992# endif
993 return VINF_SUCCESS;
994 }
995 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
996}
997
998#else /* IEM_WITH_SETJMP */
999
1000/**
1001 * Fetches the next opcode dword, longjmp on error.
1002 *
1003 * @returns The opcode dword.
1004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1005 */
1006DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1007{
1008# ifdef IEM_WITH_CODE_TLB
1009 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1010 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1011 if (RT_LIKELY( pbBuf != NULL
1012 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1013 {
1014 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1015# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1016 return *(uint32_t const *)&pbBuf[offBuf];
1017# else
1018 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1019 pbBuf[offBuf + 1],
1020 pbBuf[offBuf + 2],
1021 pbBuf[offBuf + 3]);
1022# endif
1023 }
1024# else
1025 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1026 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1027 {
1028 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1029# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1030 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1031# else
1032 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1033 pVCpu->iem.s.abOpcode[offOpcode + 1],
1034 pVCpu->iem.s.abOpcode[offOpcode + 2],
1035 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1036# endif
1037 }
1038# endif
1039 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1040}
1041
1042#endif /* IEM_WITH_SETJMP */
1043
1044/**
1045 * Fetches the next opcode dword, returns automatically on failure.
1046 *
1047 * @param a_pu32 Where to return the opcode dword.
1048 * @remark Implicitly references pVCpu.
1049 */
1050#ifndef IEM_WITH_SETJMP
1051# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1052 do \
1053 { \
1054 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1055 if (rcStrict2 != VINF_SUCCESS) \
1056 return rcStrict2; \
1057 } while (0)
1058#else
1059# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1060#endif
1061
1062#ifndef IEM_WITH_SETJMP
1063/**
1064 * Fetches the next opcode dword, zero extending it to a quad word.
1065 *
1066 * @returns Strict VBox status code.
1067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1068 * @param pu64 Where to return the opcode quad word.
1069 */
1070DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1071{
1072 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1073 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1074 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1075
1076 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1077 pVCpu->iem.s.abOpcode[offOpcode + 1],
1078 pVCpu->iem.s.abOpcode[offOpcode + 2],
1079 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1080 pVCpu->iem.s.offOpcode = offOpcode + 4;
1081 return VINF_SUCCESS;
1082}
1083#endif /* !IEM_WITH_SETJMP */
1084
1085/**
1086 * Fetches the next opcode dword and zero extends it to a quad word, returns
1087 * automatically on failure.
1088 *
1089 * @param a_pu64 Where to return the opcode quad word.
1090 * @remark Implicitly references pVCpu.
1091 */
1092#ifndef IEM_WITH_SETJMP
1093# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1094 do \
1095 { \
1096 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1097 if (rcStrict2 != VINF_SUCCESS) \
1098 return rcStrict2; \
1099 } while (0)
1100#else
1101# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1102#endif
1103
1104
1105#ifndef IEM_WITH_SETJMP
1106/**
1107 * Fetches the next signed double word from the opcode stream.
1108 *
1109 * @returns Strict VBox status code.
1110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1111 * @param pi32 Where to return the signed double word.
1112 */
1113DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1114{
1115 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1116}
1117#endif
1118
1119/**
1120 * Fetches the next signed double word from the opcode stream, returning
1121 * automatically on failure.
1122 *
1123 * @param a_pi32 Where to return the signed double word.
1124 * @remark Implicitly references pVCpu.
1125 */
1126#ifndef IEM_WITH_SETJMP
1127# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1128 do \
1129 { \
1130 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1131 if (rcStrict2 != VINF_SUCCESS) \
1132 return rcStrict2; \
1133 } while (0)
1134#else
1135# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1136#endif
1137
1138#ifndef IEM_WITH_SETJMP
1139/**
1140 * Fetches the next opcode dword, sign extending it into a quad word.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu64 Where to return the opcode quad word.
1145 */
1146DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1147{
1148 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1149 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1150 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1151
1152 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1153 pVCpu->iem.s.abOpcode[offOpcode + 1],
1154 pVCpu->iem.s.abOpcode[offOpcode + 2],
1155 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1156 *pu64 = i32;
1157 pVCpu->iem.s.offOpcode = offOpcode + 4;
1158 return VINF_SUCCESS;
1159}
1160#endif /* !IEM_WITH_SETJMP */
1161
1162/**
1163 * Fetches the next opcode double word and sign extends it to a quad word,
1164 * returns automatically on failure.
1165 *
1166 * @param a_pu64 Where to return the opcode quad word.
1167 * @remark Implicitly references pVCpu.
1168 */
1169#ifndef IEM_WITH_SETJMP
1170# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1171 do \
1172 { \
1173 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1174 if (rcStrict2 != VINF_SUCCESS) \
1175 return rcStrict2; \
1176 } while (0)
1177#else
1178# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1179#endif
1180
1181#ifndef IEM_WITH_SETJMP
1182
1183/**
1184 * Fetches the next opcode qword.
1185 *
1186 * @returns Strict VBox status code.
1187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1188 * @param pu64 Where to return the opcode qword.
1189 */
1190DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1191{
1192 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1193 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1194 {
1195# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1196 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1197# else
1198 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1199 pVCpu->iem.s.abOpcode[offOpcode + 1],
1200 pVCpu->iem.s.abOpcode[offOpcode + 2],
1201 pVCpu->iem.s.abOpcode[offOpcode + 3],
1202 pVCpu->iem.s.abOpcode[offOpcode + 4],
1203 pVCpu->iem.s.abOpcode[offOpcode + 5],
1204 pVCpu->iem.s.abOpcode[offOpcode + 6],
1205 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1206# endif
1207 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1208 return VINF_SUCCESS;
1209 }
1210 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1211}
1212
1213#else /* IEM_WITH_SETJMP */
1214
1215/**
1216 * Fetches the next opcode qword, longjmp on error.
1217 *
1218 * @returns The opcode qword.
1219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1220 */
1221DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1222{
1223# ifdef IEM_WITH_CODE_TLB
1224 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1225 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1226 if (RT_LIKELY( pbBuf != NULL
1227 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1228 {
1229 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1230# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1231 return *(uint64_t const *)&pbBuf[offBuf];
1232# else
1233 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1234 pbBuf[offBuf + 1],
1235 pbBuf[offBuf + 2],
1236 pbBuf[offBuf + 3],
1237 pbBuf[offBuf + 4],
1238 pbBuf[offBuf + 5],
1239 pbBuf[offBuf + 6],
1240 pbBuf[offBuf + 7]);
1241# endif
1242 }
1243# else
1244 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1245 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1246 {
1247 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1248# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1249 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1250# else
1251 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1252 pVCpu->iem.s.abOpcode[offOpcode + 1],
1253 pVCpu->iem.s.abOpcode[offOpcode + 2],
1254 pVCpu->iem.s.abOpcode[offOpcode + 3],
1255 pVCpu->iem.s.abOpcode[offOpcode + 4],
1256 pVCpu->iem.s.abOpcode[offOpcode + 5],
1257 pVCpu->iem.s.abOpcode[offOpcode + 6],
1258 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1259# endif
1260 }
1261# endif
1262 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1263}
1264
1265#endif /* IEM_WITH_SETJMP */
1266
1267/**
1268 * Fetches the next opcode quad word, returns automatically on failure.
1269 *
1270 * @param a_pu64 Where to return the opcode quad word.
1271 * @remark Implicitly references pVCpu.
1272 */
1273#ifndef IEM_WITH_SETJMP
1274# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1275 do \
1276 { \
1277 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1278 if (rcStrict2 != VINF_SUCCESS) \
1279 return rcStrict2; \
1280 } while (0)
1281#else
1282# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1283#endif
1284
1285
1286/** @name Misc Worker Functions.
1287 * @{
1288 */
1289
1290/**
1291 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1292 * not (kind of obsolete now).
1293 *
1294 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1295 */
1296#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1297
1298/**
1299 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1300 *
1301 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1302 * @param a_fEfl The new EFLAGS.
1303 */
1304#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1305
1306
1307/**
1308 * Loads a NULL data selector into a selector register, both the hidden and
1309 * visible parts, in protected mode.
1310 *
1311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1312 * @param pSReg Pointer to the segment register.
1313 * @param uRpl The RPL.
1314 */
1315DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1316{
1317 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1318 * data selector in protected mode. */
1319 pSReg->Sel = uRpl;
1320 pSReg->ValidSel = uRpl;
1321 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1322 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1323 {
1324 /* VT-x (Intel 3960x) observed doing something like this. */
1325 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
1326 pSReg->u32Limit = UINT32_MAX;
1327 pSReg->u64Base = 0;
1328 }
1329 else
1330 {
1331 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1332 pSReg->u32Limit = 0;
1333 pSReg->u64Base = 0;
1334 }
1335}
1336
1337/** @} */
1338
1339
1340/*
1341 *
1342 * Helpers routines.
1343 * Helpers routines.
1344 * Helpers routines.
1345 *
1346 */
1347
1348/**
1349 * Recalculates the effective operand size.
1350 *
1351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1352 */
1353DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1354{
1355 switch (pVCpu->iem.s.enmCpuMode)
1356 {
1357 case IEMMODE_16BIT:
1358 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1359 break;
1360 case IEMMODE_32BIT:
1361 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1362 break;
1363 case IEMMODE_64BIT:
1364 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1365 {
1366 case 0:
1367 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1368 break;
1369 case IEM_OP_PRF_SIZE_OP:
1370 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1371 break;
1372 case IEM_OP_PRF_SIZE_REX_W:
1373 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1374 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1375 break;
1376 }
1377 break;
1378 default:
1379 AssertFailed();
1380 }
1381}
1382
1383
1384/**
1385 * Sets the default operand size to 64-bit and recalculates the effective
1386 * operand size.
1387 *
1388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1389 */
1390DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1391{
1392 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
1393 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1394 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1395 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1396 else
1397 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1398}
1399
1400
1401/**
1402 * Sets the default operand size to 64-bit and recalculates the effective
1403 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1404 *
1405 * This is for the relative jumps.
1406 *
1407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1408 */
1409DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1410{
1411 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
1412 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1413 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1414 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1415 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1416 else
1417 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1418}
1419
1420
1421
1422
1423/** @name Register Access.
1424 * @{
1425 */
1426
1427/**
1428 * Gets a reference (pointer) to the specified hidden segment register.
1429 *
1430 * @returns Hidden register reference.
1431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1432 * @param iSegReg The segment register.
1433 */
1434DECLINLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1435{
1436 Assert(iSegReg < X86_SREG_COUNT);
1437 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1438 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1439
1440 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1441 return pSReg;
1442}
1443
1444
1445/**
1446 * Ensures that the given hidden segment register is up to date.
1447 *
1448 * @returns Hidden register reference.
1449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1450 * @param pSReg The segment register.
1451 */
1452DECLINLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1453{
1454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1455 NOREF(pVCpu);
1456 return pSReg;
1457}
1458
1459
1460/**
1461 * Gets a reference (pointer) to the specified segment register (the selector
1462 * value).
1463 *
1464 * @returns Pointer to the selector variable.
1465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1466 * @param iSegReg The segment register.
1467 */
1468DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1469{
1470 Assert(iSegReg < X86_SREG_COUNT);
1471 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1472 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1473}
1474
1475
1476/**
1477 * Fetches the selector value of a segment register.
1478 *
1479 * @returns The selector value.
1480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1481 * @param iSegReg The segment register.
1482 */
1483DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1484{
1485 Assert(iSegReg < X86_SREG_COUNT);
1486 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1487 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1488}
1489
1490
1491/**
1492 * Fetches the base address value of a segment register.
1493 *
1494 * @returns The selector value.
1495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1496 * @param iSegReg The segment register.
1497 */
1498DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1499{
1500 Assert(iSegReg < X86_SREG_COUNT);
1501 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1502 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1503}
1504
1505
1506/**
1507 * Gets a reference (pointer) to the specified general purpose register.
1508 *
1509 * @returns Register reference.
1510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1511 * @param iReg The general purpose register.
1512 */
1513DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1514{
1515 Assert(iReg < 16);
1516 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1517}
1518
1519
1520/**
1521 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1522 *
1523 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1524 *
1525 * @returns Register reference.
1526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1527 * @param iReg The register.
1528 */
1529DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1530{
1531 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1532 {
1533 Assert(iReg < 16);
1534 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1535 }
1536 /* high 8-bit register. */
1537 Assert(iReg < 8);
1538 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1539}
1540
1541
1542/**
1543 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1544 *
1545 * @returns Register reference.
1546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1547 * @param iReg The register.
1548 */
1549DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1550{
1551 Assert(iReg < 16);
1552 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1553}
1554
1555
1556/**
1557 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1558 *
1559 * @returns Register reference.
1560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1561 * @param iReg The register.
1562 */
1563DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1564{
1565 Assert(iReg < 16);
1566 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1567}
1568
1569
1570/**
1571 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1572 *
1573 * @returns Register reference.
1574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1575 * @param iReg The register.
1576 */
1577DECLINLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1578{
1579 Assert(iReg < 16);
1580 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1581}
1582
1583
1584/**
1585 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1586 *
1587 * @returns Register reference.
1588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1589 * @param iReg The register.
1590 */
1591DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1592{
1593 Assert(iReg < 64);
1594 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1595}
1596
1597
1598/**
1599 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1600 *
1601 * @returns Register reference.
1602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1603 * @param iReg The register.
1604 */
1605DECLINLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1606{
1607 Assert(iReg < 16);
1608 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1609}
1610
1611
1612/**
1613 * Gets a reference (pointer) to the specified segment register's base address.
1614 *
1615 * @returns Segment register base address reference.
1616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1617 * @param iSegReg The segment selector.
1618 */
1619DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1620{
1621 Assert(iSegReg < X86_SREG_COUNT);
1622 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1623 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1624}
1625
1626
1627/**
1628 * Fetches the value of a 8-bit general purpose register.
1629 *
1630 * @returns The register value.
1631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1632 * @param iReg The register.
1633 */
1634DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1635{
1636 return *iemGRegRefU8(pVCpu, iReg);
1637}
1638
1639
1640/**
1641 * Fetches the value of a 16-bit general purpose register.
1642 *
1643 * @returns The register value.
1644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1645 * @param iReg The register.
1646 */
1647DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1648{
1649 Assert(iReg < 16);
1650 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1651}
1652
1653
1654/**
1655 * Fetches the value of a 32-bit general purpose register.
1656 *
1657 * @returns The register value.
1658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1659 * @param iReg The register.
1660 */
1661DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1662{
1663 Assert(iReg < 16);
1664 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1665}
1666
1667
1668/**
1669 * Fetches the value of a 64-bit general purpose register.
1670 *
1671 * @returns The register value.
1672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1673 * @param iReg The register.
1674 */
1675DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1676{
1677 Assert(iReg < 16);
1678 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1679}
1680
1681
1682/**
1683 * Get the address of the top of the stack.
1684 *
1685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1686 */
1687DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1688{
1689 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1690 return pVCpu->cpum.GstCtx.rsp;
1691 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1692 return pVCpu->cpum.GstCtx.esp;
1693 return pVCpu->cpum.GstCtx.sp;
1694}
1695
1696
1697/**
1698 * Updates the RIP/EIP/IP to point to the next instruction.
1699 *
1700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1701 * @param cbInstr The number of bytes to add.
1702 */
1703DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1704{
1705 /*
1706 * Advance RIP.
1707 *
1708 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
1709 * while in all other modes except LM64 the updates are 32-bit. This means
1710 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
1711 * 4GB and 64KB rollovers, and decide whether anything needs masking.
1712 *
1713 * See PC wrap around tests in bs3-cpu-weird-1.
1714 */
1715 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
1716 uint64_t const uRipNext = uRipPrev + cbInstr;
1717 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
1718 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT))
1719 pVCpu->cpum.GstCtx.rip = uRipNext;
1720 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
1721 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
1722 else
1723 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
1724}
1725
1726
1727/**
1728 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
1729 * following EFLAGS bits are set:
1730 * - X86_EFL_RF - clear it.
1731 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
1732 * - X86_EFL_TF - generate single step \#DB trap.
1733 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
1734 * instruction).
1735 *
1736 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
1737 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
1738 * takes priority over both NMIs and hardware interrupts. So, neither is
1739 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
1740 * either unsupported will be triggered on-top of any \#DB raised here.)
1741 *
1742 * The RF flag only needs to be cleared here as it only suppresses instruction
1743 * breakpoints which are not raised here (happens synchronously during
1744 * instruction fetching).
1745 *
1746 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
1747 * status has no bearing on whether \#DB exceptions are raised.
1748 *
1749 * @note This must *NOT* be called by the two instructions setting the
1750 * CPUMCTX_INHIBIT_SHADOW_SS flag.
1751 *
1752 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
1753 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
1754 * Stacks}
1755 */
1756static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1757{
1758 /*
1759 * Normally we're just here to clear RF and/or interrupt shadow bits.
1760 */
1761 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
1762 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
1763 else
1764 {
1765 /*
1766 * Raise a #DB or/and DBGF event.
1767 */
1768 VBOXSTRICTRC rcStrict;
1769 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
1770 {
1771 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1772 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1773 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
1774 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
1775 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1776 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
1777 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1778 pVCpu->cpum.GstCtx.rflags.uBoth));
1779
1780 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
1781 rcStrict = iemRaiseDebugException(pVCpu);
1782
1783 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
1784 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
1785 {
1786 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
1787 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
1788 }
1789 }
1790 else
1791 {
1792 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
1793 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
1794 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
1795 }
1796 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
1797 return rcStrict;
1798 }
1799 return VINF_SUCCESS;
1800}
1801
1802
1803/**
1804 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
1805 *
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 */
1808DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1809{
1810 /*
1811 * We assume that most of the time nothing actually needs doing here.
1812 */
1813 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
1814 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
1815 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
1816 return VINF_SUCCESS;
1817 return iemFinishInstructionWithFlagsSet(pVCpu);
1818}
1819
1820
1821/**
1822 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
1823 * and CPUMCTX_INHIBIT_SHADOW.
1824 *
1825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1826 * @param cbInstr The number of bytes to add.
1827 */
1828DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1829{
1830 iemRegAddToRip(pVCpu, cbInstr);
1831 return iemRegFinishClearingRF(pVCpu);
1832}
1833
1834
1835/**
1836 * Extended version of iemFinishInstructionWithFlagsSet that goes with
1837 * iemRegAddToRipAndFinishingClearingRfEx.
1838 *
1839 * See iemFinishInstructionWithFlagsSet() for details.
1840 */
1841static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1842{
1843 /*
1844 * Raise a #DB.
1845 */
1846 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1847 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1848 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
1849 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1850 /** @todo Do we set all pending \#DB events, or just one? */
1851 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
1852 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1853 pVCpu->cpum.GstCtx.rflags.uBoth));
1854 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
1855 return iemRaiseDebugException(pVCpu);
1856}
1857
1858
1859/**
1860 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
1861 * others potentially updating EFLAGS.TF.
1862 *
1863 * The single step event must be generated using the TF value at the start of
1864 * the instruction, not the new value set by it.
1865 *
1866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1867 * @param cbInstr The number of bytes to add.
1868 * @param fEflOld The EFLAGS at the start of the instruction
1869 * execution.
1870 */
1871DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
1872{
1873 iemRegAddToRip(pVCpu, cbInstr);
1874 if (!(fEflOld & X86_EFL_TF))
1875 return iemRegFinishClearingRF(pVCpu);
1876 return iemFinishInstructionWithTfSet(pVCpu);
1877}
1878
1879
1880/**
1881 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
1882 *
1883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1884 */
1885DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1886{
1887 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
1888}
1889
1890
1891/**
1892 * Adds to the stack pointer.
1893 *
1894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1895 * @param cbToAdd The number of bytes to add (8-bit!).
1896 */
1897DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
1898{
1899 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1900 pVCpu->cpum.GstCtx.rsp += cbToAdd;
1901 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1902 pVCpu->cpum.GstCtx.esp += cbToAdd;
1903 else
1904 pVCpu->cpum.GstCtx.sp += cbToAdd;
1905}
1906
1907
1908/**
1909 * Subtracts from the stack pointer.
1910 *
1911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1912 * @param cbToSub The number of bytes to subtract (8-bit!).
1913 */
1914DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
1915{
1916 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1917 pVCpu->cpum.GstCtx.rsp -= cbToSub;
1918 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1919 pVCpu->cpum.GstCtx.esp -= cbToSub;
1920 else
1921 pVCpu->cpum.GstCtx.sp -= cbToSub;
1922}
1923
1924
1925/**
1926 * Adds to the temporary stack pointer.
1927 *
1928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1929 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1930 * @param cbToAdd The number of bytes to add (16-bit).
1931 */
1932DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
1933{
1934 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1935 pTmpRsp->u += cbToAdd;
1936 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1937 pTmpRsp->DWords.dw0 += cbToAdd;
1938 else
1939 pTmpRsp->Words.w0 += cbToAdd;
1940}
1941
1942
1943/**
1944 * Subtracts from the temporary stack pointer.
1945 *
1946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1947 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1948 * @param cbToSub The number of bytes to subtract.
1949 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
1950 * expecting that.
1951 */
1952DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
1953{
1954 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1955 pTmpRsp->u -= cbToSub;
1956 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1957 pTmpRsp->DWords.dw0 -= cbToSub;
1958 else
1959 pTmpRsp->Words.w0 -= cbToSub;
1960}
1961
1962
1963/**
1964 * Calculates the effective stack address for a push of the specified size as
1965 * well as the new RSP value (upper bits may be masked).
1966 *
1967 * @returns Effective stack addressf for the push.
1968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1969 * @param cbItem The size of the stack item to pop.
1970 * @param puNewRsp Where to return the new RSP value.
1971 */
1972DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
1973{
1974 RTUINT64U uTmpRsp;
1975 RTGCPTR GCPtrTop;
1976 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
1977
1978 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1979 GCPtrTop = uTmpRsp.u -= cbItem;
1980 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1981 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
1982 else
1983 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
1984 *puNewRsp = uTmpRsp.u;
1985 return GCPtrTop;
1986}
1987
1988
1989/**
1990 * Gets the current stack pointer and calculates the value after a pop of the
1991 * specified size.
1992 *
1993 * @returns Current stack pointer.
1994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1995 * @param cbItem The size of the stack item to pop.
1996 * @param puNewRsp Where to return the new RSP value.
1997 */
1998DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
1999{
2000 RTUINT64U uTmpRsp;
2001 RTGCPTR GCPtrTop;
2002 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2003
2004 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2005 {
2006 GCPtrTop = uTmpRsp.u;
2007 uTmpRsp.u += cbItem;
2008 }
2009 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2010 {
2011 GCPtrTop = uTmpRsp.DWords.dw0;
2012 uTmpRsp.DWords.dw0 += cbItem;
2013 }
2014 else
2015 {
2016 GCPtrTop = uTmpRsp.Words.w0;
2017 uTmpRsp.Words.w0 += cbItem;
2018 }
2019 *puNewRsp = uTmpRsp.u;
2020 return GCPtrTop;
2021}
2022
2023
2024/**
2025 * Calculates the effective stack address for a push of the specified size as
2026 * well as the new temporary RSP value (upper bits may be masked).
2027 *
2028 * @returns Effective stack addressf for the push.
2029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2030 * @param pTmpRsp The temporary stack pointer. This is updated.
2031 * @param cbItem The size of the stack item to pop.
2032 */
2033DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2034{
2035 RTGCPTR GCPtrTop;
2036
2037 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2038 GCPtrTop = pTmpRsp->u -= cbItem;
2039 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2040 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2041 else
2042 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2043 return GCPtrTop;
2044}
2045
2046
2047/**
2048 * Gets the effective stack address for a pop of the specified size and
2049 * calculates and updates the temporary RSP.
2050 *
2051 * @returns Current stack pointer.
2052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2053 * @param pTmpRsp The temporary stack pointer. This is updated.
2054 * @param cbItem The size of the stack item to pop.
2055 */
2056DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2057{
2058 RTGCPTR GCPtrTop;
2059 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2060 {
2061 GCPtrTop = pTmpRsp->u;
2062 pTmpRsp->u += cbItem;
2063 }
2064 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2065 {
2066 GCPtrTop = pTmpRsp->DWords.dw0;
2067 pTmpRsp->DWords.dw0 += cbItem;
2068 }
2069 else
2070 {
2071 GCPtrTop = pTmpRsp->Words.w0;
2072 pTmpRsp->Words.w0 += cbItem;
2073 }
2074 return GCPtrTop;
2075}
2076
2077/** @} */
2078
2079
2080/** @name FPU access and helpers.
2081 *
2082 * @{
2083 */
2084
2085
2086/**
2087 * Hook for preparing to use the host FPU.
2088 *
2089 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2090 *
2091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2092 */
2093DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2094{
2095#ifdef IN_RING3
2096 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2097#else
2098 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2099#endif
2100 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2101}
2102
2103
2104/**
2105 * Hook for preparing to use the host FPU for SSE.
2106 *
2107 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2108 *
2109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2110 */
2111DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2112{
2113 iemFpuPrepareUsage(pVCpu);
2114}
2115
2116
2117/**
2118 * Hook for preparing to use the host FPU for AVX.
2119 *
2120 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2121 *
2122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2123 */
2124DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2125{
2126 iemFpuPrepareUsage(pVCpu);
2127}
2128
2129
2130/**
2131 * Hook for actualizing the guest FPU state before the interpreter reads it.
2132 *
2133 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2134 *
2135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2136 */
2137DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2138{
2139#ifdef IN_RING3
2140 NOREF(pVCpu);
2141#else
2142 CPUMRZFpuStateActualizeForRead(pVCpu);
2143#endif
2144 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2145}
2146
2147
2148/**
2149 * Hook for actualizing the guest FPU state before the interpreter changes it.
2150 *
2151 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2152 *
2153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2154 */
2155DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2156{
2157#ifdef IN_RING3
2158 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2159#else
2160 CPUMRZFpuStateActualizeForChange(pVCpu);
2161#endif
2162 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2163}
2164
2165
2166/**
2167 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2168 * only.
2169 *
2170 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2171 *
2172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2173 */
2174DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2175{
2176#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2177 NOREF(pVCpu);
2178#else
2179 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2180#endif
2181 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2182}
2183
2184
2185/**
2186 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2187 * read+write.
2188 *
2189 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2190 *
2191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2192 */
2193DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2194{
2195#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2197#else
2198 CPUMRZFpuStateActualizeForChange(pVCpu);
2199#endif
2200 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2201
2202 /* Make sure any changes are loaded the next time around. */
2203 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2204}
2205
2206
2207/**
2208 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2209 * only.
2210 *
2211 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2212 *
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 */
2215DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2216{
2217#ifdef IN_RING3
2218 NOREF(pVCpu);
2219#else
2220 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2221#endif
2222 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2223}
2224
2225
2226/**
2227 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2228 * read+write.
2229 *
2230 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2231 *
2232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2233 */
2234DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2235{
2236#ifdef IN_RING3
2237 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2238#else
2239 CPUMRZFpuStateActualizeForChange(pVCpu);
2240#endif
2241 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2242
2243 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2244 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2245}
2246
2247
2248/**
2249 * Stores a QNaN value into a FPU register.
2250 *
2251 * @param pReg Pointer to the register.
2252 */
2253DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2254{
2255 pReg->au32[0] = UINT32_C(0x00000000);
2256 pReg->au32[1] = UINT32_C(0xc0000000);
2257 pReg->au16[4] = UINT16_C(0xffff);
2258}
2259
2260
2261/**
2262 * Updates the FOP, FPU.CS and FPUIP registers.
2263 *
2264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2265 * @param pFpuCtx The FPU context.
2266 */
2267DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2268{
2269 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
2270 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
2271 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2272 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2273 {
2274 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2275 * happens in real mode here based on the fnsave and fnstenv images. */
2276 pFpuCtx->CS = 0;
2277 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2278 }
2279 else if (!IEM_IS_LONG_MODE(pVCpu))
2280 {
2281 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
2282 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2283 }
2284 else
2285 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2286}
2287
2288
2289
2290
2291
2292/**
2293 * Marks the specified stack register as free (for FFREE).
2294 *
2295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2296 * @param iStReg The register to free.
2297 */
2298DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2299{
2300 Assert(iStReg < 8);
2301 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2302 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2303 pFpuCtx->FTW &= ~RT_BIT(iReg);
2304}
2305
2306
2307/**
2308 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
2309 *
2310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2311 */
2312DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2313{
2314 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2315 uint16_t uFsw = pFpuCtx->FSW;
2316 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2317 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2318 uFsw &= ~X86_FSW_TOP_MASK;
2319 uFsw |= uTop;
2320 pFpuCtx->FSW = uFsw;
2321}
2322
2323
2324/**
2325 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
2326 *
2327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2328 */
2329DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2330{
2331 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2332 uint16_t uFsw = pFpuCtx->FSW;
2333 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2334 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2335 uFsw &= ~X86_FSW_TOP_MASK;
2336 uFsw |= uTop;
2337 pFpuCtx->FSW = uFsw;
2338}
2339
2340
2341
2342
2343DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2344{
2345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2346 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2347 if (pFpuCtx->FTW & RT_BIT(iReg))
2348 return VINF_SUCCESS;
2349 return VERR_NOT_FOUND;
2350}
2351
2352
2353DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
2354{
2355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2356 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2357 if (pFpuCtx->FTW & RT_BIT(iReg))
2358 {
2359 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
2360 return VINF_SUCCESS;
2361 }
2362 return VERR_NOT_FOUND;
2363}
2364
2365
2366DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
2367 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
2368{
2369 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2370 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2371 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2372 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2373 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2374 {
2375 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2376 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
2377 return VINF_SUCCESS;
2378 }
2379 return VERR_NOT_FOUND;
2380}
2381
2382
2383DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
2384{
2385 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2386 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2387 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2388 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2389 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2390 {
2391 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2392 return VINF_SUCCESS;
2393 }
2394 return VERR_NOT_FOUND;
2395}
2396
2397
2398/**
2399 * Rotates the stack registers when setting new TOS.
2400 *
2401 * @param pFpuCtx The FPU context.
2402 * @param iNewTop New TOS value.
2403 * @remarks We only do this to speed up fxsave/fxrstor which
2404 * arrange the FP registers in stack order.
2405 * MUST be done before writing the new TOS (FSW).
2406 */
2407DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
2408{
2409 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2410 RTFLOAT80U ar80Temp[8];
2411
2412 if (iOldTop == iNewTop)
2413 return;
2414
2415 /* Unscrew the stack and get it into 'native' order. */
2416 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
2417 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
2418 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
2419 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
2420 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
2421 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
2422 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
2423 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
2424
2425 /* Now rotate the stack to the new position. */
2426 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
2427 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
2428 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
2429 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
2430 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
2431 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
2432 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
2433 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
2434}
2435
2436
2437/**
2438 * Updates the FPU exception status after FCW is changed.
2439 *
2440 * @param pFpuCtx The FPU context.
2441 */
2442DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2443{
2444 uint16_t u16Fsw = pFpuCtx->FSW;
2445 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
2446 u16Fsw |= X86_FSW_ES | X86_FSW_B;
2447 else
2448 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
2449 pFpuCtx->FSW = u16Fsw;
2450}
2451
2452
2453/**
2454 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
2455 *
2456 * @returns The full FTW.
2457 * @param pFpuCtx The FPU context.
2458 */
2459DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
2460{
2461 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
2462 uint16_t u16Ftw = 0;
2463 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2464 for (unsigned iSt = 0; iSt < 8; iSt++)
2465 {
2466 unsigned const iReg = (iSt + iTop) & 7;
2467 if (!(u8Ftw & RT_BIT(iReg)))
2468 u16Ftw |= 3 << (iReg * 2); /* empty */
2469 else
2470 {
2471 uint16_t uTag;
2472 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
2473 if (pr80Reg->s.uExponent == 0x7fff)
2474 uTag = 2; /* Exponent is all 1's => Special. */
2475 else if (pr80Reg->s.uExponent == 0x0000)
2476 {
2477 if (pr80Reg->s.uMantissa == 0x0000)
2478 uTag = 1; /* All bits are zero => Zero. */
2479 else
2480 uTag = 2; /* Must be special. */
2481 }
2482 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
2483 uTag = 0; /* Valid. */
2484 else
2485 uTag = 2; /* Must be special. */
2486
2487 u16Ftw |= uTag << (iReg * 2);
2488 }
2489 }
2490
2491 return u16Ftw;
2492}
2493
2494
2495/**
2496 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
2497 *
2498 * @returns The compressed FTW.
2499 * @param u16FullFtw The full FTW to convert.
2500 */
2501DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
2502{
2503 uint8_t u8Ftw = 0;
2504 for (unsigned i = 0; i < 8; i++)
2505 {
2506 if ((u16FullFtw & 3) != 3 /*empty*/)
2507 u8Ftw |= RT_BIT(i);
2508 u16FullFtw >>= 2;
2509 }
2510
2511 return u8Ftw;
2512}
2513
2514/** @} */
2515
2516
2517/** @name Memory access.
2518 *
2519 * @{
2520 */
2521
2522
2523/**
2524 * Checks whether alignment checks are enabled or not.
2525 *
2526 * @returns true if enabled, false if not.
2527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2528 */
2529DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
2530{
2531 AssertCompile(X86_CR0_AM == X86_EFL_AC);
2532 return pVCpu->iem.s.uCpl == 3
2533 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
2534}
2535
2536/**
2537 * Checks if the given segment can be written to, raise the appropriate
2538 * exception if not.
2539 *
2540 * @returns VBox strict status code.
2541 *
2542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2543 * @param pHid Pointer to the hidden register.
2544 * @param iSegReg The register number.
2545 * @param pu64BaseAddr Where to return the base address to use for the
2546 * segment. (In 64-bit code it may differ from the
2547 * base in the hidden segment.)
2548 */
2549DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2550 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2551{
2552 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2553
2554 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2555 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2556 else
2557 {
2558 if (!pHid->Attr.n.u1Present)
2559 {
2560 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2561 AssertRelease(uSel == 0);
2562 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2563 return iemRaiseGeneralProtectionFault0(pVCpu);
2564 }
2565
2566 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
2567 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2568 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
2569 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2570 *pu64BaseAddr = pHid->u64Base;
2571 }
2572 return VINF_SUCCESS;
2573}
2574
2575
2576/**
2577 * Checks if the given segment can be read from, raise the appropriate
2578 * exception if not.
2579 *
2580 * @returns VBox strict status code.
2581 *
2582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2583 * @param pHid Pointer to the hidden register.
2584 * @param iSegReg The register number.
2585 * @param pu64BaseAddr Where to return the base address to use for the
2586 * segment. (In 64-bit code it may differ from the
2587 * base in the hidden segment.)
2588 */
2589DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2590 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2591{
2592 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2593
2594 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2595 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2596 else
2597 {
2598 if (!pHid->Attr.n.u1Present)
2599 {
2600 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2601 AssertRelease(uSel == 0);
2602 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2603 return iemRaiseGeneralProtectionFault0(pVCpu);
2604 }
2605
2606 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2607 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2608 *pu64BaseAddr = pHid->u64Base;
2609 }
2610 return VINF_SUCCESS;
2611}
2612
2613
2614/**
2615 * Maps a physical page.
2616 *
2617 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2619 * @param GCPhysMem The physical address.
2620 * @param fAccess The intended access.
2621 * @param ppvMem Where to return the mapping address.
2622 * @param pLock The PGM lock.
2623 */
2624DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2625 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2626{
2627#ifdef IEM_LOG_MEMORY_WRITES
2628 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2629 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2630#endif
2631
2632 /** @todo This API may require some improving later. A private deal with PGM
2633 * regarding locking and unlocking needs to be struct. A couple of TLBs
2634 * living in PGM, but with publicly accessible inlined access methods
2635 * could perhaps be an even better solution. */
2636 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
2637 GCPhysMem,
2638 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2639 pVCpu->iem.s.fBypassHandlers,
2640 ppvMem,
2641 pLock);
2642 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
2643 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
2644
2645 return rc;
2646}
2647
2648
2649/**
2650 * Unmap a page previously mapped by iemMemPageMap.
2651 *
2652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2653 * @param GCPhysMem The physical address.
2654 * @param fAccess The intended access.
2655 * @param pvMem What iemMemPageMap returned.
2656 * @param pLock The PGM lock.
2657 */
2658DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2659 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2660{
2661 NOREF(pVCpu);
2662 NOREF(GCPhysMem);
2663 NOREF(fAccess);
2664 NOREF(pvMem);
2665 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
2666}
2667
2668#ifdef IEM_WITH_SETJMP
2669
2670/** @todo slim this down */
2671DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
2672 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2673{
2674 Assert(cbMem >= 1);
2675 Assert(iSegReg < X86_SREG_COUNT);
2676
2677 /*
2678 * 64-bit mode is simpler.
2679 */
2680 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2681 {
2682 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
2683 {
2684 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2685 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2686 GCPtrMem += pSel->u64Base;
2687 }
2688
2689 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2690 return GCPtrMem;
2691 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2692 }
2693 /*
2694 * 16-bit and 32-bit segmentation.
2695 */
2696 else if (iSegReg != UINT8_MAX)
2697 {
2698 /** @todo Does this apply to segments with 4G-1 limit? */
2699 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
2700 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
2701 {
2702 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2703 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2704 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2705 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
2706 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
2707 | X86_SEL_TYPE_CODE))
2708 {
2709 case X86DESCATTR_P: /* readonly data, expand up */
2710 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
2711 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
2712 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
2713 /* expand up */
2714 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
2715 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2716 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
2717 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
2718 break;
2719
2720 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
2721 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
2722 /* expand down */
2723 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
2724 && ( pSel->Attr.n.u1DefBig
2725 || GCPtrLast32 <= UINT32_C(0xffff)) ))
2726 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2727 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
2728 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
2729 break;
2730
2731 default:
2732 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
2733 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2734 break;
2735 }
2736 }
2737 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
2738 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2739 }
2740 /*
2741 * 32-bit flat address.
2742 */
2743 else
2744 return GCPtrMem;
2745}
2746
2747
2748/** @todo slim this down */
2749DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
2750 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2751{
2752 Assert(cbMem >= 1);
2753 Assert(iSegReg < X86_SREG_COUNT);
2754
2755 /*
2756 * 64-bit mode is simpler.
2757 */
2758 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2759 {
2760 if (iSegReg >= X86_SREG_FS)
2761 {
2762 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2763 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
2764 GCPtrMem += pSel->u64Base;
2765 }
2766
2767 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2768 return GCPtrMem;
2769 }
2770 /*
2771 * 16-bit and 32-bit segmentation.
2772 */
2773 else
2774 {
2775 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2776 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
2777 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2778 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
2779 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
2780 {
2781 /* expand up */
2782 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
2783 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
2784 && GCPtrLast32 > (uint32_t)GCPtrMem))
2785 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2786 }
2787 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
2788 {
2789 /* expand down */
2790 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
2791 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
2792 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2793 && GCPtrLast32 > (uint32_t)GCPtrMem))
2794 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2795 }
2796 else
2797 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2798 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2799 }
2800 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2801}
2802
2803#endif /* IEM_WITH_SETJMP */
2804
2805/**
2806 * Fakes a long mode stack selector for SS = 0.
2807 *
2808 * @param pDescSs Where to return the fake stack descriptor.
2809 * @param uDpl The DPL we want.
2810 */
2811DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
2812{
2813 pDescSs->Long.au64[0] = 0;
2814 pDescSs->Long.au64[1] = 0;
2815 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2816 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
2817 pDescSs->Long.Gen.u2Dpl = uDpl;
2818 pDescSs->Long.Gen.u1Present = 1;
2819 pDescSs->Long.Gen.u1Long = 1;
2820}
2821
2822/** @} */
2823
2824
2825#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2826
2827/**
2828 * Gets CR0 fixed-0 bits in VMX non-root mode.
2829 *
2830 * We do this rather than fetching what we report to the guest (in
2831 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
2832 * values regardless of whether unrestricted-guest feature is available on the CPU.
2833 *
2834 * @returns CR0 fixed-0 bits.
2835 * @param pVCpu The cross context virtual CPU structure.
2836 */
2837DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu) RT_NOEXCEPT
2838{
2839 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
2840 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
2841
2842 static uint64_t const s_auCr0Fixed0[2] = { VMX_V_CR0_FIXED0, VMX_V_CR0_FIXED0_UX };
2843 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2844 uint8_t const fUnrestrictedGuest = !!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2845 uint64_t const uCr0Fixed0 = s_auCr0Fixed0[fUnrestrictedGuest];
2846 Assert(!(uCr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
2847 return uCr0Fixed0;
2848}
2849
2850
2851/**
2852 * Sets virtual-APIC write emulation as pending.
2853 *
2854 * @param pVCpu The cross context virtual CPU structure.
2855 * @param offApic The offset in the virtual-APIC page that was written.
2856 */
2857DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
2858{
2859 Assert(offApic < XAPIC_OFF_END + 4);
2860
2861 /*
2862 * Record the currently updated APIC offset, as we need this later for figuring
2863 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
2864 * as for supplying the exit qualification when causing an APIC-write VM-exit.
2865 */
2866 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
2867
2868 /*
2869 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
2870 * virtualization or APIC-write emulation).
2871 */
2872 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2873 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
2874}
2875
2876#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
2877
2878#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette