VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 97694

Last change on this file since 97694 was 97694, checked in by vboxsync, 2 years ago

VMM/IEM: Added support for hardware instruction breakpoints (DRx). Corrected some DR6 updating for single stepping. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 97.7 KB
Line 
1/* $Id: IEMInline.h 97694 2022-11-28 22:08:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 if (RT_SUCCESS(rcStrict))
49 {
50 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
51 || rcStrict == VINF_IOM_R3_IOPORT_READ
52 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
53 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
54 || rcStrict == VINF_IOM_R3_MMIO_READ
55 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
56 || rcStrict == VINF_IOM_R3_MMIO_WRITE
57 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
58 || rcStrict == VINF_CPUM_R3_MSR_READ
59 || rcStrict == VINF_CPUM_R3_MSR_WRITE
60 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
61 || rcStrict == VINF_EM_RAW_TO_R3
62 || rcStrict == VINF_EM_TRIPLE_FAULT
63 || rcStrict == VINF_GIM_R3_HYPERCALL
64 /* raw-mode / virt handlers only: */
65 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
66 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
67 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
68 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
69 || rcStrict == VINF_SELM_SYNC_GDT
70 || rcStrict == VINF_CSAM_PENDING_ACTION
71 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
72 /* nested hw.virt codes: */
73 || rcStrict == VINF_VMX_VMEXIT
74 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
75 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
76 || rcStrict == VINF_SVM_VMEXIT
77 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
78/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
79 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
80#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
81 if ( rcStrict == VINF_VMX_VMEXIT
82 && rcPassUp == VINF_SUCCESS)
83 rcStrict = VINF_SUCCESS;
84 else
85#endif
86#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
87 if ( rcStrict == VINF_SVM_VMEXIT
88 && rcPassUp == VINF_SUCCESS)
89 rcStrict = VINF_SUCCESS;
90 else
91#endif
92 if (rcPassUp == VINF_SUCCESS)
93 pVCpu->iem.s.cRetInfStatuses++;
94 else if ( rcPassUp < VINF_EM_FIRST
95 || rcPassUp > VINF_EM_LAST
96 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
97 {
98 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
99 pVCpu->iem.s.cRetPassUpStatus++;
100 rcStrict = rcPassUp;
101 }
102 else
103 {
104 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
105 pVCpu->iem.s.cRetInfStatuses++;
106 }
107 }
108 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
109 pVCpu->iem.s.cRetAspectNotImplemented++;
110 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
111 pVCpu->iem.s.cRetInstrNotImplemented++;
112 else
113 pVCpu->iem.s.cRetErrStatuses++;
114 }
115 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
116 {
117 pVCpu->iem.s.cRetPassUpStatus++;
118 rcStrict = pVCpu->iem.s.rcPassUp;
119 }
120
121 return rcStrict;
122}
123
124
125/**
126 * Sets the pass up status.
127 *
128 * @returns VINF_SUCCESS.
129 * @param pVCpu The cross context virtual CPU structure of the
130 * calling thread.
131 * @param rcPassUp The pass up status. Must be informational.
132 * VINF_SUCCESS is not allowed.
133 */
134DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
135{
136 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
137
138 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
139 if (rcOldPassUp == VINF_SUCCESS)
140 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
141 /* If both are EM scheduling codes, use EM priority rules. */
142 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
143 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
144 {
145 if (rcPassUp < rcOldPassUp)
146 {
147 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
148 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
149 }
150 else
151 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
152 }
153 /* Override EM scheduling with specific status code. */
154 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
155 {
156 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
157 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
158 }
159 /* Don't override specific status code, first come first served. */
160 else
161 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
162 return VINF_SUCCESS;
163}
164
165
166/**
167 * Calculates the CPU mode.
168 *
169 * This is mainly for updating IEMCPU::enmCpuMode.
170 *
171 * @returns CPU mode.
172 * @param pVCpu The cross context virtual CPU structure of the
173 * calling thread.
174 */
175DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu) RT_NOEXCEPT
176{
177 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
178 return IEMMODE_64BIT;
179 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
180 return IEMMODE_32BIT;
181 return IEMMODE_16BIT;
182}
183
184
185#ifdef VBOX_INCLUDED_vmm_dbgf_h /* dbgf.ro.cEnabledHwBreakpoints */
186/**
187 * Initializes the execution state.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the
190 * calling thread.
191 * @param fBypassHandlers Whether to bypass access handlers.
192 *
193 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
194 * side-effects in strict builds.
195 */
196DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers) RT_NOEXCEPT
197{
198 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
199 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
208
209 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
210 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
211#ifdef VBOX_STRICT
212 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
213 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
214 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
215 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
216 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
217 pVCpu->iem.s.uRexReg = 127;
218 pVCpu->iem.s.uRexB = 127;
219 pVCpu->iem.s.offModRm = 127;
220 pVCpu->iem.s.uRexIndex = 127;
221 pVCpu->iem.s.iEffSeg = 127;
222 pVCpu->iem.s.idxPrefix = 127;
223 pVCpu->iem.s.uVex3rdReg = 127;
224 pVCpu->iem.s.uVexLength = 127;
225 pVCpu->iem.s.fEvexStuff = 127;
226 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
227# ifdef IEM_WITH_CODE_TLB
228 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
229 pVCpu->iem.s.pbInstrBuf = NULL;
230 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
231 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
232 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
233 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
234# else
235 pVCpu->iem.s.offOpcode = 127;
236 pVCpu->iem.s.cbOpcode = 127;
237# endif
238#endif
239
240 pVCpu->iem.s.cActiveMappings = 0;
241 pVCpu->iem.s.iNextMapping = 0;
242 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
243 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
244 pVCpu->iem.s.fDisregardLock = false;
245 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
246 pVCpu->iem.s.fPendingDataBreakpoints = false;
247 pVCpu->iem.s.fPendingIoBreakpoints = false;
248 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
249 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
250 { /* likely */ }
251 else
252 iemInitPendingBreakpointsSlow(pVCpu);
253}
254#endif /* VBOX_INCLUDED_vmm_dbgf_h */
255
256
257#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
258/**
259 * Performs a minimal reinitialization of the execution state.
260 *
261 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
262 * 'world-switch' types operations on the CPU. Currently only nested
263 * hardware-virtualization uses it.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
268{
269 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
270 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
271
272 pVCpu->iem.s.uCpl = uCpl;
273 pVCpu->iem.s.enmCpuMode = enmMode;
274 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffAddrMode = enmMode;
276 if (enmMode != IEMMODE_64BIT)
277 {
278 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
279 pVCpu->iem.s.enmEffOpSize = enmMode;
280 }
281 else
282 {
283 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
284 pVCpu->iem.s.enmEffOpSize = enmMode;
285 }
286 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
287#ifndef IEM_WITH_CODE_TLB
288 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
289 pVCpu->iem.s.offOpcode = 0;
290 pVCpu->iem.s.cbOpcode = 0;
291#endif
292 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
293}
294#endif
295
296/**
297 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
298 *
299 * @param pVCpu The cross context virtual CPU structure of the
300 * calling thread.
301 */
302DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
303{
304 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
305#ifdef VBOX_STRICT
306# ifdef IEM_WITH_CODE_TLB
307 NOREF(pVCpu);
308# else
309 pVCpu->iem.s.cbOpcode = 0;
310# endif
311#else
312 NOREF(pVCpu);
313#endif
314}
315
316
317/**
318 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
319 *
320 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
321 *
322 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
324 * @param rcStrict The status code to fiddle.
325 */
326DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
327{
328 iemUninitExec(pVCpu);
329 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
330}
331
332
333/**
334 * Macro used by the IEMExec* method to check the given instruction length.
335 *
336 * Will return on failure!
337 *
338 * @param a_cbInstr The given instruction length.
339 * @param a_cbMin The minimum length.
340 */
341#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
342 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
343 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
344
345
346#ifndef IEM_WITH_SETJMP
347
348/**
349 * Fetches the first opcode byte.
350 *
351 * @returns Strict VBox status code.
352 * @param pVCpu The cross context virtual CPU structure of the
353 * calling thread.
354 * @param pu8 Where to return the opcode byte.
355 */
356DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
357{
358 /*
359 * Check for hardware instruction breakpoints.
360 */
361 if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
362 { /* likely */ }
363 else
364 {
365 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
366 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
367 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
368 { /* likely */ }
369 else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
370 return iemRaiseDebugException(pVCpu);
371 else
372 return rcStrict;
373 }
374
375 /*
376 * Fetch the first opcode byte.
377 */
378 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
379 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
380 {
381 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
382 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
383 return VINF_SUCCESS;
384 }
385 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
386}
387
388#else /* IEM_WITH_SETJMP */
389
390/**
391 * Fetches the first opcode byte, longjmp on error.
392 *
393 * @returns The opcode byte.
394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
395 */
396DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
397{
398 /*
399 * Check for hardware instruction breakpoints.
400 */
401 if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
402 { /* likely */ }
403 else
404 {
405 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
406 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
407 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
408 { /* likely */ }
409 else
410 {
411 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
412 rcStrict = iemRaiseDebugException(pVCpu);
413 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
414 }
415 }
416
417 /*
418 * Fetch the first opcode byte.
419 */
420# ifdef IEM_WITH_CODE_TLB
421 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
422 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
423 if (RT_LIKELY( pbBuf != NULL
424 && offBuf < pVCpu->iem.s.cbInstrBuf))
425 {
426 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
427 return pbBuf[offBuf];
428 }
429# else
430 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
431 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
432 {
433 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
434 return pVCpu->iem.s.abOpcode[offOpcode];
435 }
436# endif
437 return iemOpcodeGetNextU8SlowJmp(pVCpu);
438}
439
440#endif /* IEM_WITH_SETJMP */
441
442/**
443 * Fetches the first opcode byte, returns/throws automatically on failure.
444 *
445 * @param a_pu8 Where to return the opcode byte.
446 * @remark Implicitly references pVCpu.
447 */
448#ifndef IEM_WITH_SETJMP
449# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
450 do \
451 { \
452 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
453 if (rcStrict2 == VINF_SUCCESS) \
454 { /* likely */ } \
455 else \
456 return rcStrict2; \
457 } while (0)
458#else
459# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
460#endif /* IEM_WITH_SETJMP */
461
462
463#ifndef IEM_WITH_SETJMP
464
465/**
466 * Fetches the next opcode byte.
467 *
468 * @returns Strict VBox status code.
469 * @param pVCpu The cross context virtual CPU structure of the
470 * calling thread.
471 * @param pu8 Where to return the opcode byte.
472 */
473DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
474{
475 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
476 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
477 {
478 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
479 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
480 return VINF_SUCCESS;
481 }
482 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
483}
484
485#else /* IEM_WITH_SETJMP */
486
487/**
488 * Fetches the next opcode byte, longjmp on error.
489 *
490 * @returns The opcode byte.
491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
492 */
493DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
494{
495# ifdef IEM_WITH_CODE_TLB
496 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
497 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
498 if (RT_LIKELY( pbBuf != NULL
499 && offBuf < pVCpu->iem.s.cbInstrBuf))
500 {
501 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
502 return pbBuf[offBuf];
503 }
504# else
505 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
506 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
507 {
508 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
509 return pVCpu->iem.s.abOpcode[offOpcode];
510 }
511# endif
512 return iemOpcodeGetNextU8SlowJmp(pVCpu);
513}
514
515#endif /* IEM_WITH_SETJMP */
516
517/**
518 * Fetches the next opcode byte, returns automatically on failure.
519 *
520 * @param a_pu8 Where to return the opcode byte.
521 * @remark Implicitly references pVCpu.
522 */
523#ifndef IEM_WITH_SETJMP
524# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
525 do \
526 { \
527 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
528 if (rcStrict2 == VINF_SUCCESS) \
529 { /* likely */ } \
530 else \
531 return rcStrict2; \
532 } while (0)
533#else
534# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
535#endif /* IEM_WITH_SETJMP */
536
537
538#ifndef IEM_WITH_SETJMP
539/**
540 * Fetches the next signed byte from the opcode stream.
541 *
542 * @returns Strict VBox status code.
543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
544 * @param pi8 Where to return the signed byte.
545 */
546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
547{
548 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
549}
550#endif /* !IEM_WITH_SETJMP */
551
552
553/**
554 * Fetches the next signed byte from the opcode stream, returning automatically
555 * on failure.
556 *
557 * @param a_pi8 Where to return the signed byte.
558 * @remark Implicitly references pVCpu.
559 */
560#ifndef IEM_WITH_SETJMP
561# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
562 do \
563 { \
564 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
565 if (rcStrict2 != VINF_SUCCESS) \
566 return rcStrict2; \
567 } while (0)
568#else /* IEM_WITH_SETJMP */
569# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
570
571#endif /* IEM_WITH_SETJMP */
572
573
574#ifndef IEM_WITH_SETJMP
575/**
576 * Fetches the next signed byte from the opcode stream, extending it to
577 * unsigned 16-bit.
578 *
579 * @returns Strict VBox status code.
580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
581 * @param pu16 Where to return the unsigned word.
582 */
583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
584{
585 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
586 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
587 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
588
589 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
590 pVCpu->iem.s.offOpcode = offOpcode + 1;
591 return VINF_SUCCESS;
592}
593#endif /* !IEM_WITH_SETJMP */
594
595/**
596 * Fetches the next signed byte from the opcode stream and sign-extending it to
597 * a word, returning automatically on failure.
598 *
599 * @param a_pu16 Where to return the word.
600 * @remark Implicitly references pVCpu.
601 */
602#ifndef IEM_WITH_SETJMP
603# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
604 do \
605 { \
606 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
607 if (rcStrict2 != VINF_SUCCESS) \
608 return rcStrict2; \
609 } while (0)
610#else
611# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
612#endif
613
614#ifndef IEM_WITH_SETJMP
615/**
616 * Fetches the next signed byte from the opcode stream, extending it to
617 * unsigned 32-bit.
618 *
619 * @returns Strict VBox status code.
620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
621 * @param pu32 Where to return the unsigned dword.
622 */
623DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
624{
625 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
626 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
627 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
628
629 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
630 pVCpu->iem.s.offOpcode = offOpcode + 1;
631 return VINF_SUCCESS;
632}
633#endif /* !IEM_WITH_SETJMP */
634
635/**
636 * Fetches the next signed byte from the opcode stream and sign-extending it to
637 * a word, returning automatically on failure.
638 *
639 * @param a_pu32 Where to return the word.
640 * @remark Implicitly references pVCpu.
641 */
642#ifndef IEM_WITH_SETJMP
643#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
644 do \
645 { \
646 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
647 if (rcStrict2 != VINF_SUCCESS) \
648 return rcStrict2; \
649 } while (0)
650#else
651# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
652#endif
653
654
655#ifndef IEM_WITH_SETJMP
656/**
657 * Fetches the next signed byte from the opcode stream, extending it to
658 * unsigned 64-bit.
659 *
660 * @returns Strict VBox status code.
661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
662 * @param pu64 Where to return the unsigned qword.
663 */
664DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
665{
666 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
667 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
668 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
669
670 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
671 pVCpu->iem.s.offOpcode = offOpcode + 1;
672 return VINF_SUCCESS;
673}
674#endif /* !IEM_WITH_SETJMP */
675
676/**
677 * Fetches the next signed byte from the opcode stream and sign-extending it to
678 * a word, returning automatically on failure.
679 *
680 * @param a_pu64 Where to return the word.
681 * @remark Implicitly references pVCpu.
682 */
683#ifndef IEM_WITH_SETJMP
684# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
685 do \
686 { \
687 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
688 if (rcStrict2 != VINF_SUCCESS) \
689 return rcStrict2; \
690 } while (0)
691#else
692# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
693#endif
694
695
696#ifndef IEM_WITH_SETJMP
697/**
698 * Fetches the next opcode byte.
699 *
700 * @returns Strict VBox status code.
701 * @param pVCpu The cross context virtual CPU structure of the
702 * calling thread.
703 * @param pu8 Where to return the opcode byte.
704 */
705DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
706{
707 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
708 pVCpu->iem.s.offModRm = offOpcode;
709 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
710 {
711 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
712 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
713 return VINF_SUCCESS;
714 }
715 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
716}
717#else /* IEM_WITH_SETJMP */
718/**
719 * Fetches the next opcode byte, longjmp on error.
720 *
721 * @returns The opcode byte.
722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
723 */
724DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
725{
726# ifdef IEM_WITH_CODE_TLB
727 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
728 pVCpu->iem.s.offModRm = offBuf;
729 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
730 if (RT_LIKELY( pbBuf != NULL
731 && offBuf < pVCpu->iem.s.cbInstrBuf))
732 {
733 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
734 return pbBuf[offBuf];
735 }
736# else
737 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
738 pVCpu->iem.s.offModRm = offOpcode;
739 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
740 {
741 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
742 return pVCpu->iem.s.abOpcode[offOpcode];
743 }
744# endif
745 return iemOpcodeGetNextU8SlowJmp(pVCpu);
746}
747#endif /* IEM_WITH_SETJMP */
748
749/**
750 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
751 * on failure.
752 *
753 * Will note down the position of the ModR/M byte for VT-x exits.
754 *
755 * @param a_pbRm Where to return the RM opcode byte.
756 * @remark Implicitly references pVCpu.
757 */
758#ifndef IEM_WITH_SETJMP
759# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
760 do \
761 { \
762 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
763 if (rcStrict2 == VINF_SUCCESS) \
764 { /* likely */ } \
765 else \
766 return rcStrict2; \
767 } while (0)
768#else
769# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
770#endif /* IEM_WITH_SETJMP */
771
772
773#ifndef IEM_WITH_SETJMP
774
775/**
776 * Fetches the next opcode word.
777 *
778 * @returns Strict VBox status code.
779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
780 * @param pu16 Where to return the opcode word.
781 */
782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
783{
784 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
785 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
786 {
787 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
788# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
789 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
790# else
791 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
792# endif
793 return VINF_SUCCESS;
794 }
795 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
796}
797
798#else /* IEM_WITH_SETJMP */
799
800/**
801 * Fetches the next opcode word, longjmp on error.
802 *
803 * @returns The opcode word.
804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
805 */
806DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
807{
808# ifdef IEM_WITH_CODE_TLB
809 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
810 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
811 if (RT_LIKELY( pbBuf != NULL
812 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
813 {
814 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
815# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
816 return *(uint16_t const *)&pbBuf[offBuf];
817# else
818 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
819# endif
820 }
821# else
822 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
823 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
824 {
825 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
826# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
827 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
828# else
829 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
830# endif
831 }
832# endif
833 return iemOpcodeGetNextU16SlowJmp(pVCpu);
834}
835
836#endif /* IEM_WITH_SETJMP */
837
838/**
839 * Fetches the next opcode word, returns automatically on failure.
840 *
841 * @param a_pu16 Where to return the opcode word.
842 * @remark Implicitly references pVCpu.
843 */
844#ifndef IEM_WITH_SETJMP
845# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
846 do \
847 { \
848 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
849 if (rcStrict2 != VINF_SUCCESS) \
850 return rcStrict2; \
851 } while (0)
852#else
853# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
854#endif
855
856#ifndef IEM_WITH_SETJMP
857/**
858 * Fetches the next opcode word, zero extending it to a double word.
859 *
860 * @returns Strict VBox status code.
861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
862 * @param pu32 Where to return the opcode double word.
863 */
864DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
865{
866 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
867 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
868 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
869
870 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
871 pVCpu->iem.s.offOpcode = offOpcode + 2;
872 return VINF_SUCCESS;
873}
874#endif /* !IEM_WITH_SETJMP */
875
876/**
877 * Fetches the next opcode word and zero extends it to a double word, returns
878 * automatically on failure.
879 *
880 * @param a_pu32 Where to return the opcode double word.
881 * @remark Implicitly references pVCpu.
882 */
883#ifndef IEM_WITH_SETJMP
884# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
885 do \
886 { \
887 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
888 if (rcStrict2 != VINF_SUCCESS) \
889 return rcStrict2; \
890 } while (0)
891#else
892# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
893#endif
894
895#ifndef IEM_WITH_SETJMP
896/**
897 * Fetches the next opcode word, zero extending it to a quad word.
898 *
899 * @returns Strict VBox status code.
900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
901 * @param pu64 Where to return the opcode quad word.
902 */
903DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
904{
905 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
906 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
907 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
908
909 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
910 pVCpu->iem.s.offOpcode = offOpcode + 2;
911 return VINF_SUCCESS;
912}
913#endif /* !IEM_WITH_SETJMP */
914
915/**
916 * Fetches the next opcode word and zero extends it to a quad word, returns
917 * automatically on failure.
918 *
919 * @param a_pu64 Where to return the opcode quad word.
920 * @remark Implicitly references pVCpu.
921 */
922#ifndef IEM_WITH_SETJMP
923# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
924 do \
925 { \
926 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
927 if (rcStrict2 != VINF_SUCCESS) \
928 return rcStrict2; \
929 } while (0)
930#else
931# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
932#endif
933
934
935#ifndef IEM_WITH_SETJMP
936/**
937 * Fetches the next signed word from the opcode stream.
938 *
939 * @returns Strict VBox status code.
940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
941 * @param pi16 Where to return the signed word.
942 */
943DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
944{
945 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
946}
947#endif /* !IEM_WITH_SETJMP */
948
949
950/**
951 * Fetches the next signed word from the opcode stream, returning automatically
952 * on failure.
953 *
954 * @param a_pi16 Where to return the signed word.
955 * @remark Implicitly references pVCpu.
956 */
957#ifndef IEM_WITH_SETJMP
958# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
959 do \
960 { \
961 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
962 if (rcStrict2 != VINF_SUCCESS) \
963 return rcStrict2; \
964 } while (0)
965#else
966# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
967#endif
968
969#ifndef IEM_WITH_SETJMP
970
971/**
972 * Fetches the next opcode dword.
973 *
974 * @returns Strict VBox status code.
975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
976 * @param pu32 Where to return the opcode double word.
977 */
978DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
979{
980 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
981 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
982 {
983 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
984# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
985 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
986# else
987 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
988 pVCpu->iem.s.abOpcode[offOpcode + 1],
989 pVCpu->iem.s.abOpcode[offOpcode + 2],
990 pVCpu->iem.s.abOpcode[offOpcode + 3]);
991# endif
992 return VINF_SUCCESS;
993 }
994 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
995}
996
997#else /* IEM_WITH_SETJMP */
998
999/**
1000 * Fetches the next opcode dword, longjmp on error.
1001 *
1002 * @returns The opcode dword.
1003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1004 */
1005DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1006{
1007# ifdef IEM_WITH_CODE_TLB
1008 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1009 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1010 if (RT_LIKELY( pbBuf != NULL
1011 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1012 {
1013 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1014# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1015 return *(uint32_t const *)&pbBuf[offBuf];
1016# else
1017 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1018 pbBuf[offBuf + 1],
1019 pbBuf[offBuf + 2],
1020 pbBuf[offBuf + 3]);
1021# endif
1022 }
1023# else
1024 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1025 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1026 {
1027 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1028# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1029 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1030# else
1031 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1032 pVCpu->iem.s.abOpcode[offOpcode + 1],
1033 pVCpu->iem.s.abOpcode[offOpcode + 2],
1034 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1035# endif
1036 }
1037# endif
1038 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1039}
1040
1041#endif /* IEM_WITH_SETJMP */
1042
1043/**
1044 * Fetches the next opcode dword, returns automatically on failure.
1045 *
1046 * @param a_pu32 Where to return the opcode dword.
1047 * @remark Implicitly references pVCpu.
1048 */
1049#ifndef IEM_WITH_SETJMP
1050# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1051 do \
1052 { \
1053 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1054 if (rcStrict2 != VINF_SUCCESS) \
1055 return rcStrict2; \
1056 } while (0)
1057#else
1058# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1059#endif
1060
1061#ifndef IEM_WITH_SETJMP
1062/**
1063 * Fetches the next opcode dword, zero extending it to a quad word.
1064 *
1065 * @returns Strict VBox status code.
1066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1067 * @param pu64 Where to return the opcode quad word.
1068 */
1069DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1070{
1071 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1072 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1073 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1074
1075 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1076 pVCpu->iem.s.abOpcode[offOpcode + 1],
1077 pVCpu->iem.s.abOpcode[offOpcode + 2],
1078 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1079 pVCpu->iem.s.offOpcode = offOpcode + 4;
1080 return VINF_SUCCESS;
1081}
1082#endif /* !IEM_WITH_SETJMP */
1083
1084/**
1085 * Fetches the next opcode dword and zero extends it to a quad word, returns
1086 * automatically on failure.
1087 *
1088 * @param a_pu64 Where to return the opcode quad word.
1089 * @remark Implicitly references pVCpu.
1090 */
1091#ifndef IEM_WITH_SETJMP
1092# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1093 do \
1094 { \
1095 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1096 if (rcStrict2 != VINF_SUCCESS) \
1097 return rcStrict2; \
1098 } while (0)
1099#else
1100# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1101#endif
1102
1103
1104#ifndef IEM_WITH_SETJMP
1105/**
1106 * Fetches the next signed double word from the opcode stream.
1107 *
1108 * @returns Strict VBox status code.
1109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1110 * @param pi32 Where to return the signed double word.
1111 */
1112DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1113{
1114 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1115}
1116#endif
1117
1118/**
1119 * Fetches the next signed double word from the opcode stream, returning
1120 * automatically on failure.
1121 *
1122 * @param a_pi32 Where to return the signed double word.
1123 * @remark Implicitly references pVCpu.
1124 */
1125#ifndef IEM_WITH_SETJMP
1126# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1127 do \
1128 { \
1129 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1130 if (rcStrict2 != VINF_SUCCESS) \
1131 return rcStrict2; \
1132 } while (0)
1133#else
1134# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1135#endif
1136
1137#ifndef IEM_WITH_SETJMP
1138/**
1139 * Fetches the next opcode dword, sign extending it into a quad word.
1140 *
1141 * @returns Strict VBox status code.
1142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1143 * @param pu64 Where to return the opcode quad word.
1144 */
1145DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1146{
1147 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1148 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1149 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1150
1151 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1152 pVCpu->iem.s.abOpcode[offOpcode + 1],
1153 pVCpu->iem.s.abOpcode[offOpcode + 2],
1154 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1155 *pu64 = i32;
1156 pVCpu->iem.s.offOpcode = offOpcode + 4;
1157 return VINF_SUCCESS;
1158}
1159#endif /* !IEM_WITH_SETJMP */
1160
1161/**
1162 * Fetches the next opcode double word and sign extends it to a quad word,
1163 * returns automatically on failure.
1164 *
1165 * @param a_pu64 Where to return the opcode quad word.
1166 * @remark Implicitly references pVCpu.
1167 */
1168#ifndef IEM_WITH_SETJMP
1169# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1170 do \
1171 { \
1172 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1173 if (rcStrict2 != VINF_SUCCESS) \
1174 return rcStrict2; \
1175 } while (0)
1176#else
1177# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1178#endif
1179
1180#ifndef IEM_WITH_SETJMP
1181
1182/**
1183 * Fetches the next opcode qword.
1184 *
1185 * @returns Strict VBox status code.
1186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1187 * @param pu64 Where to return the opcode qword.
1188 */
1189DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1190{
1191 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1192 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1193 {
1194# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1195 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1196# else
1197 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1198 pVCpu->iem.s.abOpcode[offOpcode + 1],
1199 pVCpu->iem.s.abOpcode[offOpcode + 2],
1200 pVCpu->iem.s.abOpcode[offOpcode + 3],
1201 pVCpu->iem.s.abOpcode[offOpcode + 4],
1202 pVCpu->iem.s.abOpcode[offOpcode + 5],
1203 pVCpu->iem.s.abOpcode[offOpcode + 6],
1204 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1205# endif
1206 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1207 return VINF_SUCCESS;
1208 }
1209 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1210}
1211
1212#else /* IEM_WITH_SETJMP */
1213
1214/**
1215 * Fetches the next opcode qword, longjmp on error.
1216 *
1217 * @returns The opcode qword.
1218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1219 */
1220DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1221{
1222# ifdef IEM_WITH_CODE_TLB
1223 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1224 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1225 if (RT_LIKELY( pbBuf != NULL
1226 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1227 {
1228 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1229# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1230 return *(uint64_t const *)&pbBuf[offBuf];
1231# else
1232 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1233 pbBuf[offBuf + 1],
1234 pbBuf[offBuf + 2],
1235 pbBuf[offBuf + 3],
1236 pbBuf[offBuf + 4],
1237 pbBuf[offBuf + 5],
1238 pbBuf[offBuf + 6],
1239 pbBuf[offBuf + 7]);
1240# endif
1241 }
1242# else
1243 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1244 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1245 {
1246 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1247# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1248 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1249# else
1250 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1251 pVCpu->iem.s.abOpcode[offOpcode + 1],
1252 pVCpu->iem.s.abOpcode[offOpcode + 2],
1253 pVCpu->iem.s.abOpcode[offOpcode + 3],
1254 pVCpu->iem.s.abOpcode[offOpcode + 4],
1255 pVCpu->iem.s.abOpcode[offOpcode + 5],
1256 pVCpu->iem.s.abOpcode[offOpcode + 6],
1257 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1258# endif
1259 }
1260# endif
1261 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1262}
1263
1264#endif /* IEM_WITH_SETJMP */
1265
1266/**
1267 * Fetches the next opcode quad word, returns automatically on failure.
1268 *
1269 * @param a_pu64 Where to return the opcode quad word.
1270 * @remark Implicitly references pVCpu.
1271 */
1272#ifndef IEM_WITH_SETJMP
1273# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1274 do \
1275 { \
1276 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1277 if (rcStrict2 != VINF_SUCCESS) \
1278 return rcStrict2; \
1279 } while (0)
1280#else
1281# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1282#endif
1283
1284
1285/** @name Misc Worker Functions.
1286 * @{
1287 */
1288
1289/**
1290 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1291 * not (kind of obsolete now).
1292 *
1293 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1294 */
1295#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1296
1297/**
1298 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1299 *
1300 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1301 * @param a_fEfl The new EFLAGS.
1302 */
1303#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1304
1305
1306/**
1307 * Loads a NULL data selector into a selector register, both the hidden and
1308 * visible parts, in protected mode.
1309 *
1310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1311 * @param pSReg Pointer to the segment register.
1312 * @param uRpl The RPL.
1313 */
1314DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1315{
1316 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1317 * data selector in protected mode. */
1318 pSReg->Sel = uRpl;
1319 pSReg->ValidSel = uRpl;
1320 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1321 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1322 {
1323 /* VT-x (Intel 3960x) observed doing something like this. */
1324 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
1325 pSReg->u32Limit = UINT32_MAX;
1326 pSReg->u64Base = 0;
1327 }
1328 else
1329 {
1330 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1331 pSReg->u32Limit = 0;
1332 pSReg->u64Base = 0;
1333 }
1334}
1335
1336/** @} */
1337
1338
1339/*
1340 *
1341 * Helpers routines.
1342 * Helpers routines.
1343 * Helpers routines.
1344 *
1345 */
1346
1347/**
1348 * Recalculates the effective operand size.
1349 *
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 */
1352DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1353{
1354 switch (pVCpu->iem.s.enmCpuMode)
1355 {
1356 case IEMMODE_16BIT:
1357 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1358 break;
1359 case IEMMODE_32BIT:
1360 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1361 break;
1362 case IEMMODE_64BIT:
1363 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1364 {
1365 case 0:
1366 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1367 break;
1368 case IEM_OP_PRF_SIZE_OP:
1369 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1370 break;
1371 case IEM_OP_PRF_SIZE_REX_W:
1372 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1373 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1374 break;
1375 }
1376 break;
1377 default:
1378 AssertFailed();
1379 }
1380}
1381
1382
1383/**
1384 * Sets the default operand size to 64-bit and recalculates the effective
1385 * operand size.
1386 *
1387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1388 */
1389DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1390{
1391 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
1392 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1393 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1394 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1395 else
1396 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1397}
1398
1399
1400/**
1401 * Sets the default operand size to 64-bit and recalculates the effective
1402 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1403 *
1404 * This is for the relative jumps.
1405 *
1406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1407 */
1408DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1409{
1410 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
1411 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1412 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1413 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1414 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1415 else
1416 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1417}
1418
1419
1420
1421
1422/** @name Register Access.
1423 * @{
1424 */
1425
1426/**
1427 * Gets a reference (pointer) to the specified hidden segment register.
1428 *
1429 * @returns Hidden register reference.
1430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1431 * @param iSegReg The segment register.
1432 */
1433DECLINLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1434{
1435 Assert(iSegReg < X86_SREG_COUNT);
1436 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1437 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1438
1439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1440 return pSReg;
1441}
1442
1443
1444/**
1445 * Ensures that the given hidden segment register is up to date.
1446 *
1447 * @returns Hidden register reference.
1448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1449 * @param pSReg The segment register.
1450 */
1451DECLINLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1452{
1453 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1454 NOREF(pVCpu);
1455 return pSReg;
1456}
1457
1458
1459/**
1460 * Gets a reference (pointer) to the specified segment register (the selector
1461 * value).
1462 *
1463 * @returns Pointer to the selector variable.
1464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1465 * @param iSegReg The segment register.
1466 */
1467DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1468{
1469 Assert(iSegReg < X86_SREG_COUNT);
1470 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1471 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1472}
1473
1474
1475/**
1476 * Fetches the selector value of a segment register.
1477 *
1478 * @returns The selector value.
1479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1480 * @param iSegReg The segment register.
1481 */
1482DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1483{
1484 Assert(iSegReg < X86_SREG_COUNT);
1485 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1486 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1487}
1488
1489
1490/**
1491 * Fetches the base address value of a segment register.
1492 *
1493 * @returns The selector value.
1494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1495 * @param iSegReg The segment register.
1496 */
1497DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1498{
1499 Assert(iSegReg < X86_SREG_COUNT);
1500 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1501 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1502}
1503
1504
1505/**
1506 * Gets a reference (pointer) to the specified general purpose register.
1507 *
1508 * @returns Register reference.
1509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1510 * @param iReg The general purpose register.
1511 */
1512DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1513{
1514 Assert(iReg < 16);
1515 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1516}
1517
1518
1519/**
1520 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1521 *
1522 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1523 *
1524 * @returns Register reference.
1525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1526 * @param iReg The register.
1527 */
1528DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1529{
1530 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1531 {
1532 Assert(iReg < 16);
1533 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1534 }
1535 /* high 8-bit register. */
1536 Assert(iReg < 8);
1537 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1538}
1539
1540
1541/**
1542 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1543 *
1544 * @returns Register reference.
1545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1546 * @param iReg The register.
1547 */
1548DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1549{
1550 Assert(iReg < 16);
1551 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1552}
1553
1554
1555/**
1556 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1557 *
1558 * @returns Register reference.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param iReg The register.
1561 */
1562DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1563{
1564 Assert(iReg < 16);
1565 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1566}
1567
1568
1569/**
1570 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1571 *
1572 * @returns Register reference.
1573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1574 * @param iReg The register.
1575 */
1576DECLINLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1577{
1578 Assert(iReg < 16);
1579 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1580}
1581
1582
1583/**
1584 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1585 *
1586 * @returns Register reference.
1587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1588 * @param iReg The register.
1589 */
1590DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1591{
1592 Assert(iReg < 64);
1593 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1594}
1595
1596
1597/**
1598 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1599 *
1600 * @returns Register reference.
1601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1602 * @param iReg The register.
1603 */
1604DECLINLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1605{
1606 Assert(iReg < 16);
1607 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1608}
1609
1610
1611/**
1612 * Gets a reference (pointer) to the specified segment register's base address.
1613 *
1614 * @returns Segment register base address reference.
1615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1616 * @param iSegReg The segment selector.
1617 */
1618DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1619{
1620 Assert(iSegReg < X86_SREG_COUNT);
1621 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1622 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1623}
1624
1625
1626/**
1627 * Fetches the value of a 8-bit general purpose register.
1628 *
1629 * @returns The register value.
1630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1631 * @param iReg The register.
1632 */
1633DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1634{
1635 return *iemGRegRefU8(pVCpu, iReg);
1636}
1637
1638
1639/**
1640 * Fetches the value of a 16-bit general purpose register.
1641 *
1642 * @returns The register value.
1643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1644 * @param iReg The register.
1645 */
1646DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1647{
1648 Assert(iReg < 16);
1649 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1650}
1651
1652
1653/**
1654 * Fetches the value of a 32-bit general purpose register.
1655 *
1656 * @returns The register value.
1657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1658 * @param iReg The register.
1659 */
1660DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1661{
1662 Assert(iReg < 16);
1663 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1664}
1665
1666
1667/**
1668 * Fetches the value of a 64-bit general purpose register.
1669 *
1670 * @returns The register value.
1671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1672 * @param iReg The register.
1673 */
1674DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1675{
1676 Assert(iReg < 16);
1677 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1678}
1679
1680
1681/**
1682 * Get the address of the top of the stack.
1683 *
1684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1685 */
1686DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1687{
1688 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1689 return pVCpu->cpum.GstCtx.rsp;
1690 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1691 return pVCpu->cpum.GstCtx.esp;
1692 return pVCpu->cpum.GstCtx.sp;
1693}
1694
1695
1696/**
1697 * Updates the RIP/EIP/IP to point to the next instruction.
1698 *
1699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1700 * @param cbInstr The number of bytes to add.
1701 */
1702DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1703{
1704 /*
1705 * Advance RIP.
1706 *
1707 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
1708 * while in all other modes except LM64 the updates are 32-bit. This means
1709 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
1710 * 4GB and 64KB rollovers, and decide whether anything needs masking.
1711 *
1712 * See PC wrap around tests in bs3-cpu-weird-1.
1713 */
1714 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
1715 uint64_t const uRipNext = uRipPrev + cbInstr;
1716 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
1717 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT))
1718 pVCpu->cpum.GstCtx.rip = uRipNext;
1719 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
1720 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
1721 else
1722 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
1723}
1724
1725
1726/**
1727 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
1728 * following EFLAGS bits are set:
1729 * - X86_EFL_RF - clear it.
1730 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
1731 * - X86_EFL_TF - generate single step \#DB trap.
1732 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
1733 * instruction).
1734 *
1735 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
1736 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
1737 * takes priority over both NMIs and hardware interrupts. So, neither is
1738 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
1739 * either unsupported will be triggered on-top of any \#DB raised here.)
1740 *
1741 * The RF flag only needs to be cleared here as it only suppresses instruction
1742 * breakpoints which are not raised here (happens synchronously during
1743 * instruction fetching).
1744 *
1745 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
1746 * status has no bearing on whether \#DB exceptions are raised.
1747 *
1748 * @note This must *NOT* be called by the two instructions setting the
1749 * CPUMCTX_INHIBIT_SHADOW_SS flag.
1750 *
1751 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
1752 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
1753 * Stacks}
1754 */
1755static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1756{
1757 /*
1758 * Normally we're just here to clear RF and/or interrupt shadow bits.
1759 */
1760 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK)) == 0))
1761 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
1762 else
1763 {
1764#if 1
1765 /*
1766 * Raise a #DB.
1767 */
1768 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1769 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1770 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
1771 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
1772 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1773 /** @todo Do we set all pending \#DB events, or just one? */
1774 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
1775 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1776 pVCpu->cpum.GstCtx.rflags.uBoth));
1777 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
1778 return iemRaiseDebugException(pVCpu);
1779#else
1780 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
1781#endif
1782 }
1783 return VINF_SUCCESS;
1784}
1785
1786
1787/**
1788 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
1789 *
1790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1791 */
1792DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1793{
1794 /*
1795 * We assume that most of the time nothing actually needs doing here.
1796 */
1797 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
1798 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
1799 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK)) ))
1800 return VINF_SUCCESS;
1801 return iemFinishInstructionWithFlagsSet(pVCpu);
1802}
1803
1804
1805/**
1806 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
1807 * and CPUMCTX_INHIBIT_SHADOW.
1808 *
1809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1810 * @param cbInstr The number of bytes to add.
1811 */
1812DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1813{
1814 iemRegAddToRip(pVCpu, cbInstr);
1815 return iemRegFinishClearingRF(pVCpu);
1816}
1817
1818
1819/**
1820 * Extended version of iemFinishInstructionWithFlagsSet that goes with
1821 * iemRegAddToRipAndFinishingClearingRfEx.
1822 *
1823 * See iemFinishInstructionWithFlagsSet() for details.
1824 */
1825static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1826{
1827 /*
1828 * Raise a #DB.
1829 */
1830 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1831 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1832 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
1833 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1834 /** @todo Do we set all pending \#DB events, or just one? */
1835 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
1836 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1837 pVCpu->cpum.GstCtx.rflags.uBoth));
1838 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
1839 return iemRaiseDebugException(pVCpu);
1840}
1841
1842
1843/**
1844 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
1845 * others potentially updating EFLAGS.TF.
1846 *
1847 * The single step event must be generated using the TF value at the start of
1848 * the instruction, not the new value set by it.
1849 *
1850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1851 * @param cbInstr The number of bytes to add.
1852 * @param fEflOld The EFLAGS at the start of the instruction
1853 * execution.
1854 */
1855DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
1856{
1857 iemRegAddToRip(pVCpu, cbInstr);
1858 if (!(fEflOld & X86_EFL_TF))
1859 return iemRegFinishClearingRF(pVCpu);
1860 return iemFinishInstructionWithTfSet(pVCpu);
1861}
1862
1863
1864/**
1865 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
1866 *
1867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1868 */
1869DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1870{
1871 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
1872}
1873
1874
1875/**
1876 * Adds to the stack pointer.
1877 *
1878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1879 * @param cbToAdd The number of bytes to add (8-bit!).
1880 */
1881DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
1882{
1883 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1884 pVCpu->cpum.GstCtx.rsp += cbToAdd;
1885 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1886 pVCpu->cpum.GstCtx.esp += cbToAdd;
1887 else
1888 pVCpu->cpum.GstCtx.sp += cbToAdd;
1889}
1890
1891
1892/**
1893 * Subtracts from the stack pointer.
1894 *
1895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1896 * @param cbToSub The number of bytes to subtract (8-bit!).
1897 */
1898DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
1899{
1900 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1901 pVCpu->cpum.GstCtx.rsp -= cbToSub;
1902 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1903 pVCpu->cpum.GstCtx.esp -= cbToSub;
1904 else
1905 pVCpu->cpum.GstCtx.sp -= cbToSub;
1906}
1907
1908
1909/**
1910 * Adds to the temporary stack pointer.
1911 *
1912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1913 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1914 * @param cbToAdd The number of bytes to add (16-bit).
1915 */
1916DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
1917{
1918 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1919 pTmpRsp->u += cbToAdd;
1920 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1921 pTmpRsp->DWords.dw0 += cbToAdd;
1922 else
1923 pTmpRsp->Words.w0 += cbToAdd;
1924}
1925
1926
1927/**
1928 * Subtracts from the temporary stack pointer.
1929 *
1930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1931 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1932 * @param cbToSub The number of bytes to subtract.
1933 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
1934 * expecting that.
1935 */
1936DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
1937{
1938 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1939 pTmpRsp->u -= cbToSub;
1940 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1941 pTmpRsp->DWords.dw0 -= cbToSub;
1942 else
1943 pTmpRsp->Words.w0 -= cbToSub;
1944}
1945
1946
1947/**
1948 * Calculates the effective stack address for a push of the specified size as
1949 * well as the new RSP value (upper bits may be masked).
1950 *
1951 * @returns Effective stack addressf for the push.
1952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1953 * @param cbItem The size of the stack item to pop.
1954 * @param puNewRsp Where to return the new RSP value.
1955 */
1956DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
1957{
1958 RTUINT64U uTmpRsp;
1959 RTGCPTR GCPtrTop;
1960 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
1961
1962 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1963 GCPtrTop = uTmpRsp.u -= cbItem;
1964 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1965 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
1966 else
1967 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
1968 *puNewRsp = uTmpRsp.u;
1969 return GCPtrTop;
1970}
1971
1972
1973/**
1974 * Gets the current stack pointer and calculates the value after a pop of the
1975 * specified size.
1976 *
1977 * @returns Current stack pointer.
1978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1979 * @param cbItem The size of the stack item to pop.
1980 * @param puNewRsp Where to return the new RSP value.
1981 */
1982DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
1983{
1984 RTUINT64U uTmpRsp;
1985 RTGCPTR GCPtrTop;
1986 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
1987
1988 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1989 {
1990 GCPtrTop = uTmpRsp.u;
1991 uTmpRsp.u += cbItem;
1992 }
1993 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1994 {
1995 GCPtrTop = uTmpRsp.DWords.dw0;
1996 uTmpRsp.DWords.dw0 += cbItem;
1997 }
1998 else
1999 {
2000 GCPtrTop = uTmpRsp.Words.w0;
2001 uTmpRsp.Words.w0 += cbItem;
2002 }
2003 *puNewRsp = uTmpRsp.u;
2004 return GCPtrTop;
2005}
2006
2007
2008/**
2009 * Calculates the effective stack address for a push of the specified size as
2010 * well as the new temporary RSP value (upper bits may be masked).
2011 *
2012 * @returns Effective stack addressf for the push.
2013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2014 * @param pTmpRsp The temporary stack pointer. This is updated.
2015 * @param cbItem The size of the stack item to pop.
2016 */
2017DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2018{
2019 RTGCPTR GCPtrTop;
2020
2021 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2022 GCPtrTop = pTmpRsp->u -= cbItem;
2023 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2024 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2025 else
2026 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2027 return GCPtrTop;
2028}
2029
2030
2031/**
2032 * Gets the effective stack address for a pop of the specified size and
2033 * calculates and updates the temporary RSP.
2034 *
2035 * @returns Current stack pointer.
2036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2037 * @param pTmpRsp The temporary stack pointer. This is updated.
2038 * @param cbItem The size of the stack item to pop.
2039 */
2040DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2041{
2042 RTGCPTR GCPtrTop;
2043 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2044 {
2045 GCPtrTop = pTmpRsp->u;
2046 pTmpRsp->u += cbItem;
2047 }
2048 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2049 {
2050 GCPtrTop = pTmpRsp->DWords.dw0;
2051 pTmpRsp->DWords.dw0 += cbItem;
2052 }
2053 else
2054 {
2055 GCPtrTop = pTmpRsp->Words.w0;
2056 pTmpRsp->Words.w0 += cbItem;
2057 }
2058 return GCPtrTop;
2059}
2060
2061/** @} */
2062
2063
2064/** @name FPU access and helpers.
2065 *
2066 * @{
2067 */
2068
2069
2070/**
2071 * Hook for preparing to use the host FPU.
2072 *
2073 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2074 *
2075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2076 */
2077DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2078{
2079#ifdef IN_RING3
2080 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2081#else
2082 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2083#endif
2084 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2085}
2086
2087
2088/**
2089 * Hook for preparing to use the host FPU for SSE.
2090 *
2091 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2092 *
2093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2094 */
2095DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2096{
2097 iemFpuPrepareUsage(pVCpu);
2098}
2099
2100
2101/**
2102 * Hook for preparing to use the host FPU for AVX.
2103 *
2104 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2105 *
2106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2107 */
2108DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2109{
2110 iemFpuPrepareUsage(pVCpu);
2111}
2112
2113
2114/**
2115 * Hook for actualizing the guest FPU state before the interpreter reads it.
2116 *
2117 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2118 *
2119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2120 */
2121DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2122{
2123#ifdef IN_RING3
2124 NOREF(pVCpu);
2125#else
2126 CPUMRZFpuStateActualizeForRead(pVCpu);
2127#endif
2128 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2129}
2130
2131
2132/**
2133 * Hook for actualizing the guest FPU state before the interpreter changes it.
2134 *
2135 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2136 *
2137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2138 */
2139DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2140{
2141#ifdef IN_RING3
2142 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2143#else
2144 CPUMRZFpuStateActualizeForChange(pVCpu);
2145#endif
2146 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2147}
2148
2149
2150/**
2151 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2152 * only.
2153 *
2154 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2155 *
2156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2157 */
2158DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2159{
2160#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2161 NOREF(pVCpu);
2162#else
2163 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2164#endif
2165 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2166}
2167
2168
2169/**
2170 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2171 * read+write.
2172 *
2173 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 */
2177DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2178{
2179#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2180 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2181#else
2182 CPUMRZFpuStateActualizeForChange(pVCpu);
2183#endif
2184 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2185
2186 /* Make sure any changes are loaded the next time around. */
2187 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2188}
2189
2190
2191/**
2192 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2193 * only.
2194 *
2195 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2196 *
2197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2198 */
2199DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2200{
2201#ifdef IN_RING3
2202 NOREF(pVCpu);
2203#else
2204 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2205#endif
2206 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2207}
2208
2209
2210/**
2211 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2212 * read+write.
2213 *
2214 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2215 *
2216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2217 */
2218DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2219{
2220#ifdef IN_RING3
2221 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2222#else
2223 CPUMRZFpuStateActualizeForChange(pVCpu);
2224#endif
2225 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2226
2227 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2228 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2229}
2230
2231
2232/**
2233 * Stores a QNaN value into a FPU register.
2234 *
2235 * @param pReg Pointer to the register.
2236 */
2237DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2238{
2239 pReg->au32[0] = UINT32_C(0x00000000);
2240 pReg->au32[1] = UINT32_C(0xc0000000);
2241 pReg->au16[4] = UINT16_C(0xffff);
2242}
2243
2244
2245/**
2246 * Updates the FOP, FPU.CS and FPUIP registers.
2247 *
2248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2249 * @param pFpuCtx The FPU context.
2250 */
2251DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2252{
2253 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
2254 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
2255 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2256 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2257 {
2258 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2259 * happens in real mode here based on the fnsave and fnstenv images. */
2260 pFpuCtx->CS = 0;
2261 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2262 }
2263 else if (!IEM_IS_LONG_MODE(pVCpu))
2264 {
2265 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
2266 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2267 }
2268 else
2269 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2270}
2271
2272
2273
2274
2275
2276/**
2277 * Marks the specified stack register as free (for FFREE).
2278 *
2279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2280 * @param iStReg The register to free.
2281 */
2282DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2283{
2284 Assert(iStReg < 8);
2285 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2286 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2287 pFpuCtx->FTW &= ~RT_BIT(iReg);
2288}
2289
2290
2291/**
2292 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
2293 *
2294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2295 */
2296DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2297{
2298 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2299 uint16_t uFsw = pFpuCtx->FSW;
2300 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2301 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2302 uFsw &= ~X86_FSW_TOP_MASK;
2303 uFsw |= uTop;
2304 pFpuCtx->FSW = uFsw;
2305}
2306
2307
2308/**
2309 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
2310 *
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 */
2313DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2314{
2315 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2316 uint16_t uFsw = pFpuCtx->FSW;
2317 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2318 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2319 uFsw &= ~X86_FSW_TOP_MASK;
2320 uFsw |= uTop;
2321 pFpuCtx->FSW = uFsw;
2322}
2323
2324
2325
2326
2327DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2328{
2329 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2330 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2331 if (pFpuCtx->FTW & RT_BIT(iReg))
2332 return VINF_SUCCESS;
2333 return VERR_NOT_FOUND;
2334}
2335
2336
2337DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
2338{
2339 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2340 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2341 if (pFpuCtx->FTW & RT_BIT(iReg))
2342 {
2343 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
2344 return VINF_SUCCESS;
2345 }
2346 return VERR_NOT_FOUND;
2347}
2348
2349
2350DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
2351 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
2352{
2353 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2354 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2355 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2356 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2357 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2358 {
2359 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2360 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
2361 return VINF_SUCCESS;
2362 }
2363 return VERR_NOT_FOUND;
2364}
2365
2366
2367DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
2368{
2369 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2370 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2371 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2372 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2373 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2374 {
2375 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2376 return VINF_SUCCESS;
2377 }
2378 return VERR_NOT_FOUND;
2379}
2380
2381
2382/**
2383 * Rotates the stack registers when setting new TOS.
2384 *
2385 * @param pFpuCtx The FPU context.
2386 * @param iNewTop New TOS value.
2387 * @remarks We only do this to speed up fxsave/fxrstor which
2388 * arrange the FP registers in stack order.
2389 * MUST be done before writing the new TOS (FSW).
2390 */
2391DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
2392{
2393 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2394 RTFLOAT80U ar80Temp[8];
2395
2396 if (iOldTop == iNewTop)
2397 return;
2398
2399 /* Unscrew the stack and get it into 'native' order. */
2400 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
2401 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
2402 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
2403 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
2404 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
2405 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
2406 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
2407 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
2408
2409 /* Now rotate the stack to the new position. */
2410 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
2411 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
2412 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
2413 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
2414 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
2415 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
2416 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
2417 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
2418}
2419
2420
2421/**
2422 * Updates the FPU exception status after FCW is changed.
2423 *
2424 * @param pFpuCtx The FPU context.
2425 */
2426DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2427{
2428 uint16_t u16Fsw = pFpuCtx->FSW;
2429 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
2430 u16Fsw |= X86_FSW_ES | X86_FSW_B;
2431 else
2432 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
2433 pFpuCtx->FSW = u16Fsw;
2434}
2435
2436
2437/**
2438 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
2439 *
2440 * @returns The full FTW.
2441 * @param pFpuCtx The FPU context.
2442 */
2443DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
2444{
2445 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
2446 uint16_t u16Ftw = 0;
2447 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2448 for (unsigned iSt = 0; iSt < 8; iSt++)
2449 {
2450 unsigned const iReg = (iSt + iTop) & 7;
2451 if (!(u8Ftw & RT_BIT(iReg)))
2452 u16Ftw |= 3 << (iReg * 2); /* empty */
2453 else
2454 {
2455 uint16_t uTag;
2456 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
2457 if (pr80Reg->s.uExponent == 0x7fff)
2458 uTag = 2; /* Exponent is all 1's => Special. */
2459 else if (pr80Reg->s.uExponent == 0x0000)
2460 {
2461 if (pr80Reg->s.uMantissa == 0x0000)
2462 uTag = 1; /* All bits are zero => Zero. */
2463 else
2464 uTag = 2; /* Must be special. */
2465 }
2466 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
2467 uTag = 0; /* Valid. */
2468 else
2469 uTag = 2; /* Must be special. */
2470
2471 u16Ftw |= uTag << (iReg * 2);
2472 }
2473 }
2474
2475 return u16Ftw;
2476}
2477
2478
2479/**
2480 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
2481 *
2482 * @returns The compressed FTW.
2483 * @param u16FullFtw The full FTW to convert.
2484 */
2485DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
2486{
2487 uint8_t u8Ftw = 0;
2488 for (unsigned i = 0; i < 8; i++)
2489 {
2490 if ((u16FullFtw & 3) != 3 /*empty*/)
2491 u8Ftw |= RT_BIT(i);
2492 u16FullFtw >>= 2;
2493 }
2494
2495 return u8Ftw;
2496}
2497
2498/** @} */
2499
2500
2501/** @name Memory access.
2502 *
2503 * @{
2504 */
2505
2506
2507/**
2508 * Checks whether alignment checks are enabled or not.
2509 *
2510 * @returns true if enabled, false if not.
2511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2512 */
2513DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
2514{
2515 AssertCompile(X86_CR0_AM == X86_EFL_AC);
2516 return pVCpu->iem.s.uCpl == 3
2517 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
2518}
2519
2520/**
2521 * Checks if the given segment can be written to, raise the appropriate
2522 * exception if not.
2523 *
2524 * @returns VBox strict status code.
2525 *
2526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2527 * @param pHid Pointer to the hidden register.
2528 * @param iSegReg The register number.
2529 * @param pu64BaseAddr Where to return the base address to use for the
2530 * segment. (In 64-bit code it may differ from the
2531 * base in the hidden segment.)
2532 */
2533DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2534 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2535{
2536 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2537
2538 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2539 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2540 else
2541 {
2542 if (!pHid->Attr.n.u1Present)
2543 {
2544 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2545 AssertRelease(uSel == 0);
2546 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2547 return iemRaiseGeneralProtectionFault0(pVCpu);
2548 }
2549
2550 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
2551 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2552 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
2553 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2554 *pu64BaseAddr = pHid->u64Base;
2555 }
2556 return VINF_SUCCESS;
2557}
2558
2559
2560/**
2561 * Checks if the given segment can be read from, raise the appropriate
2562 * exception if not.
2563 *
2564 * @returns VBox strict status code.
2565 *
2566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2567 * @param pHid Pointer to the hidden register.
2568 * @param iSegReg The register number.
2569 * @param pu64BaseAddr Where to return the base address to use for the
2570 * segment. (In 64-bit code it may differ from the
2571 * base in the hidden segment.)
2572 */
2573DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2574 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2575{
2576 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2577
2578 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2579 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2580 else
2581 {
2582 if (!pHid->Attr.n.u1Present)
2583 {
2584 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2585 AssertRelease(uSel == 0);
2586 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2587 return iemRaiseGeneralProtectionFault0(pVCpu);
2588 }
2589
2590 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2591 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2592 *pu64BaseAddr = pHid->u64Base;
2593 }
2594 return VINF_SUCCESS;
2595}
2596
2597
2598/**
2599 * Maps a physical page.
2600 *
2601 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2603 * @param GCPhysMem The physical address.
2604 * @param fAccess The intended access.
2605 * @param ppvMem Where to return the mapping address.
2606 * @param pLock The PGM lock.
2607 */
2608DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2609 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2610{
2611#ifdef IEM_LOG_MEMORY_WRITES
2612 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2613 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2614#endif
2615
2616 /** @todo This API may require some improving later. A private deal with PGM
2617 * regarding locking and unlocking needs to be struct. A couple of TLBs
2618 * living in PGM, but with publicly accessible inlined access methods
2619 * could perhaps be an even better solution. */
2620 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
2621 GCPhysMem,
2622 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2623 pVCpu->iem.s.fBypassHandlers,
2624 ppvMem,
2625 pLock);
2626 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
2627 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
2628
2629 return rc;
2630}
2631
2632
2633/**
2634 * Unmap a page previously mapped by iemMemPageMap.
2635 *
2636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2637 * @param GCPhysMem The physical address.
2638 * @param fAccess The intended access.
2639 * @param pvMem What iemMemPageMap returned.
2640 * @param pLock The PGM lock.
2641 */
2642DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2643 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2644{
2645 NOREF(pVCpu);
2646 NOREF(GCPhysMem);
2647 NOREF(fAccess);
2648 NOREF(pvMem);
2649 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
2650}
2651
2652#ifdef IEM_WITH_SETJMP
2653
2654/** @todo slim this down */
2655DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
2656 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2657{
2658 Assert(cbMem >= 1);
2659 Assert(iSegReg < X86_SREG_COUNT);
2660
2661 /*
2662 * 64-bit mode is simpler.
2663 */
2664 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2665 {
2666 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
2667 {
2668 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2669 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2670 GCPtrMem += pSel->u64Base;
2671 }
2672
2673 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2674 return GCPtrMem;
2675 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2676 }
2677 /*
2678 * 16-bit and 32-bit segmentation.
2679 */
2680 else if (iSegReg != UINT8_MAX)
2681 {
2682 /** @todo Does this apply to segments with 4G-1 limit? */
2683 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
2684 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
2685 {
2686 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2687 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2688 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2689 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
2690 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
2691 | X86_SEL_TYPE_CODE))
2692 {
2693 case X86DESCATTR_P: /* readonly data, expand up */
2694 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
2695 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
2696 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
2697 /* expand up */
2698 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
2699 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2700 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
2701 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
2702 break;
2703
2704 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
2705 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
2706 /* expand down */
2707 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
2708 && ( pSel->Attr.n.u1DefBig
2709 || GCPtrLast32 <= UINT32_C(0xffff)) ))
2710 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2711 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
2712 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
2713 break;
2714
2715 default:
2716 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
2717 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2718 break;
2719 }
2720 }
2721 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
2722 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2723 }
2724 /*
2725 * 32-bit flat address.
2726 */
2727 else
2728 return GCPtrMem;
2729}
2730
2731
2732/** @todo slim this down */
2733DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
2734 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2735{
2736 Assert(cbMem >= 1);
2737 Assert(iSegReg < X86_SREG_COUNT);
2738
2739 /*
2740 * 64-bit mode is simpler.
2741 */
2742 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2743 {
2744 if (iSegReg >= X86_SREG_FS)
2745 {
2746 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2747 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
2748 GCPtrMem += pSel->u64Base;
2749 }
2750
2751 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2752 return GCPtrMem;
2753 }
2754 /*
2755 * 16-bit and 32-bit segmentation.
2756 */
2757 else
2758 {
2759 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2760 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
2761 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2762 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
2763 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
2764 {
2765 /* expand up */
2766 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
2767 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
2768 && GCPtrLast32 > (uint32_t)GCPtrMem))
2769 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2770 }
2771 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
2772 {
2773 /* expand down */
2774 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
2775 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
2776 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2777 && GCPtrLast32 > (uint32_t)GCPtrMem))
2778 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2779 }
2780 else
2781 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2782 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2783 }
2784 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2785}
2786
2787#endif /* IEM_WITH_SETJMP */
2788
2789/**
2790 * Fakes a long mode stack selector for SS = 0.
2791 *
2792 * @param pDescSs Where to return the fake stack descriptor.
2793 * @param uDpl The DPL we want.
2794 */
2795DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
2796{
2797 pDescSs->Long.au64[0] = 0;
2798 pDescSs->Long.au64[1] = 0;
2799 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2800 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
2801 pDescSs->Long.Gen.u2Dpl = uDpl;
2802 pDescSs->Long.Gen.u1Present = 1;
2803 pDescSs->Long.Gen.u1Long = 1;
2804}
2805
2806/** @} */
2807
2808
2809#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2810
2811/**
2812 * Gets CR0 fixed-0 bits in VMX non-root mode.
2813 *
2814 * We do this rather than fetching what we report to the guest (in
2815 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
2816 * values regardless of whether unrestricted-guest feature is available on the CPU.
2817 *
2818 * @returns CR0 fixed-0 bits.
2819 * @param pVCpu The cross context virtual CPU structure.
2820 */
2821DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu) RT_NOEXCEPT
2822{
2823 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
2824 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
2825
2826 static uint64_t const s_auCr0Fixed0[2] = { VMX_V_CR0_FIXED0, VMX_V_CR0_FIXED0_UX };
2827 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2828 uint8_t const fUnrestrictedGuest = !!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2829 uint64_t const uCr0Fixed0 = s_auCr0Fixed0[fUnrestrictedGuest];
2830 Assert(!(uCr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
2831 return uCr0Fixed0;
2832}
2833
2834
2835/**
2836 * Sets virtual-APIC write emulation as pending.
2837 *
2838 * @param pVCpu The cross context virtual CPU structure.
2839 * @param offApic The offset in the virtual-APIC page that was written.
2840 */
2841DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
2842{
2843 Assert(offApic < XAPIC_OFF_END + 4);
2844
2845 /*
2846 * Record the currently updated APIC offset, as we need this later for figuring
2847 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
2848 * as for supplying the exit qualification when causing an APIC-write VM-exit.
2849 */
2850 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
2851
2852 /*
2853 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
2854 * virtualization or APIC-write emulation).
2855 */
2856 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2857 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
2858}
2859
2860#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
2861
2862#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette