VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 97714

Last change on this file since 97714 was 97714, checked in by vboxsync, 2 years ago

VMM/IEM: Comments and preprocessor indent adjustments. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.7 KB
Line 
1/* $Id: IEMInline.h 97714 2022-11-30 08:31:23Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 if (RT_SUCCESS(rcStrict))
49 {
50 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
51 || rcStrict == VINF_IOM_R3_IOPORT_READ
52 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
53 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
54 || rcStrict == VINF_IOM_R3_MMIO_READ
55 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
56 || rcStrict == VINF_IOM_R3_MMIO_WRITE
57 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
58 || rcStrict == VINF_CPUM_R3_MSR_READ
59 || rcStrict == VINF_CPUM_R3_MSR_WRITE
60 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
61 || rcStrict == VINF_EM_RAW_TO_R3
62 || rcStrict == VINF_EM_TRIPLE_FAULT
63 || rcStrict == VINF_GIM_R3_HYPERCALL
64 /* raw-mode / virt handlers only: */
65 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
66 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
67 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
68 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
69 || rcStrict == VINF_SELM_SYNC_GDT
70 || rcStrict == VINF_CSAM_PENDING_ACTION
71 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
72 /* nested hw.virt codes: */
73 || rcStrict == VINF_VMX_VMEXIT
74 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
75 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
76 || rcStrict == VINF_SVM_VMEXIT
77 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
78/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
79 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
80#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
81 if ( rcStrict == VINF_VMX_VMEXIT
82 && rcPassUp == VINF_SUCCESS)
83 rcStrict = VINF_SUCCESS;
84 else
85#endif
86#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
87 if ( rcStrict == VINF_SVM_VMEXIT
88 && rcPassUp == VINF_SUCCESS)
89 rcStrict = VINF_SUCCESS;
90 else
91#endif
92 if (rcPassUp == VINF_SUCCESS)
93 pVCpu->iem.s.cRetInfStatuses++;
94 else if ( rcPassUp < VINF_EM_FIRST
95 || rcPassUp > VINF_EM_LAST
96 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
97 {
98 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
99 pVCpu->iem.s.cRetPassUpStatus++;
100 rcStrict = rcPassUp;
101 }
102 else
103 {
104 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
105 pVCpu->iem.s.cRetInfStatuses++;
106 }
107 }
108 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
109 pVCpu->iem.s.cRetAspectNotImplemented++;
110 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
111 pVCpu->iem.s.cRetInstrNotImplemented++;
112 else
113 pVCpu->iem.s.cRetErrStatuses++;
114 }
115 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
116 {
117 pVCpu->iem.s.cRetPassUpStatus++;
118 rcStrict = pVCpu->iem.s.rcPassUp;
119 }
120
121 return rcStrict;
122}
123
124
125/**
126 * Sets the pass up status.
127 *
128 * @returns VINF_SUCCESS.
129 * @param pVCpu The cross context virtual CPU structure of the
130 * calling thread.
131 * @param rcPassUp The pass up status. Must be informational.
132 * VINF_SUCCESS is not allowed.
133 */
134DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
135{
136 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
137
138 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
139 if (rcOldPassUp == VINF_SUCCESS)
140 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
141 /* If both are EM scheduling codes, use EM priority rules. */
142 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
143 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
144 {
145 if (rcPassUp < rcOldPassUp)
146 {
147 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
148 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
149 }
150 else
151 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
152 }
153 /* Override EM scheduling with specific status code. */
154 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
155 {
156 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
157 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
158 }
159 /* Don't override specific status code, first come first served. */
160 else
161 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
162 return VINF_SUCCESS;
163}
164
165
166/**
167 * Calculates the CPU mode.
168 *
169 * This is mainly for updating IEMCPU::enmCpuMode.
170 *
171 * @returns CPU mode.
172 * @param pVCpu The cross context virtual CPU structure of the
173 * calling thread.
174 */
175DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu) RT_NOEXCEPT
176{
177 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
178 return IEMMODE_64BIT;
179 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
180 return IEMMODE_32BIT;
181 return IEMMODE_16BIT;
182}
183
184
185#ifdef VBOX_INCLUDED_vmm_dbgf_h /* dbgf.ro.cEnabledHwBreakpoints */
186/**
187 * Initializes the execution state.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the
190 * calling thread.
191 * @param fBypassHandlers Whether to bypass access handlers.
192 *
193 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
194 * side-effects in strict builds.
195 */
196DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers) RT_NOEXCEPT
197{
198 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
199 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
208
209 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
210 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
211# ifdef VBOX_STRICT
212 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
213 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
214 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
215 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
216 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
217 pVCpu->iem.s.uRexReg = 127;
218 pVCpu->iem.s.uRexB = 127;
219 pVCpu->iem.s.offModRm = 127;
220 pVCpu->iem.s.uRexIndex = 127;
221 pVCpu->iem.s.iEffSeg = 127;
222 pVCpu->iem.s.idxPrefix = 127;
223 pVCpu->iem.s.uVex3rdReg = 127;
224 pVCpu->iem.s.uVexLength = 127;
225 pVCpu->iem.s.fEvexStuff = 127;
226 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
227# ifdef IEM_WITH_CODE_TLB
228 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
229 pVCpu->iem.s.pbInstrBuf = NULL;
230 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
231 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
232 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
233 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
234# else
235 pVCpu->iem.s.offOpcode = 127;
236 pVCpu->iem.s.cbOpcode = 127;
237# endif
238# endif /* VBOX_STRICT */
239
240 pVCpu->iem.s.cActiveMappings = 0;
241 pVCpu->iem.s.iNextMapping = 0;
242 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
243 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
244 pVCpu->iem.s.fDisregardLock = false;
245 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
246 pVCpu->iem.s.fPendingDataBreakpoints = false;
247 pVCpu->iem.s.fPendingIoBreakpoints = false;
248 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
249 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
250 { /* likely */ }
251 else
252 iemInitPendingBreakpointsSlow(pVCpu);
253}
254#endif /* VBOX_INCLUDED_vmm_dbgf_h */
255
256
257#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
258/**
259 * Performs a minimal reinitialization of the execution state.
260 *
261 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
262 * 'world-switch' types operations on the CPU. Currently only nested
263 * hardware-virtualization uses it.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
268{
269 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
270 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
271
272 pVCpu->iem.s.uCpl = uCpl;
273 pVCpu->iem.s.enmCpuMode = enmMode;
274 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffAddrMode = enmMode;
276 if (enmMode != IEMMODE_64BIT)
277 {
278 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
279 pVCpu->iem.s.enmEffOpSize = enmMode;
280 }
281 else
282 {
283 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
284 pVCpu->iem.s.enmEffOpSize = enmMode;
285 }
286 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
287# ifndef IEM_WITH_CODE_TLB
288 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
289 pVCpu->iem.s.offOpcode = 0;
290 pVCpu->iem.s.cbOpcode = 0;
291# endif
292 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
293}
294#endif
295
296/**
297 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
298 *
299 * @param pVCpu The cross context virtual CPU structure of the
300 * calling thread.
301 */
302DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
303{
304 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
305#ifdef VBOX_STRICT
306# ifdef IEM_WITH_CODE_TLB
307 NOREF(pVCpu);
308# else
309 pVCpu->iem.s.cbOpcode = 0;
310# endif
311#else
312 NOREF(pVCpu);
313#endif
314}
315
316
317/**
318 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
319 *
320 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
321 *
322 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
324 * @param rcStrict The status code to fiddle.
325 */
326DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
327{
328 iemUninitExec(pVCpu);
329 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
330}
331
332
333/**
334 * Macro used by the IEMExec* method to check the given instruction length.
335 *
336 * Will return on failure!
337 *
338 * @param a_cbInstr The given instruction length.
339 * @param a_cbMin The minimum length.
340 */
341#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
342 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
343 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
344
345
346#ifndef IEM_WITH_SETJMP
347
348/**
349 * Fetches the first opcode byte.
350 *
351 * @returns Strict VBox status code.
352 * @param pVCpu The cross context virtual CPU structure of the
353 * calling thread.
354 * @param pu8 Where to return the opcode byte.
355 */
356DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
357{
358 /*
359 * Check for hardware instruction breakpoints.
360 */
361 if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
362 { /* likely */ }
363 else
364 {
365 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
366 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
367 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
368 { /* likely */ }
369 else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
370 return iemRaiseDebugException(pVCpu);
371 else
372 return rcStrict;
373 }
374
375 /*
376 * Fetch the first opcode byte.
377 */
378 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
379 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
380 {
381 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
382 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
383 return VINF_SUCCESS;
384 }
385 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
386}
387
388#else /* IEM_WITH_SETJMP */
389
390/**
391 * Fetches the first opcode byte, longjmp on error.
392 *
393 * @returns The opcode byte.
394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
395 */
396DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
397{
398 /*
399 * Check for hardware instruction breakpoints.
400 */
401 if (RT_LIKELY(!pVCpu->iem.s.fPendingInstructionBreakpoints))
402 { /* likely */ }
403 else
404 {
405 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
406 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
407 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
408 { /* likely */ }
409 else
410 {
411 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
412 rcStrict = iemRaiseDebugException(pVCpu);
413 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
414 }
415 }
416
417 /*
418 * Fetch the first opcode byte.
419 */
420# ifdef IEM_WITH_CODE_TLB
421 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
422 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
423 if (RT_LIKELY( pbBuf != NULL
424 && offBuf < pVCpu->iem.s.cbInstrBuf))
425 {
426 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
427 return pbBuf[offBuf];
428 }
429# else
430 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
431 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
432 {
433 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
434 return pVCpu->iem.s.abOpcode[offOpcode];
435 }
436# endif
437 return iemOpcodeGetNextU8SlowJmp(pVCpu);
438}
439
440#endif /* IEM_WITH_SETJMP */
441
442/**
443 * Fetches the first opcode byte, returns/throws automatically on failure.
444 *
445 * @param a_pu8 Where to return the opcode byte.
446 * @remark Implicitly references pVCpu.
447 */
448#ifndef IEM_WITH_SETJMP
449# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
450 do \
451 { \
452 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
453 if (rcStrict2 == VINF_SUCCESS) \
454 { /* likely */ } \
455 else \
456 return rcStrict2; \
457 } while (0)
458#else
459# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
460#endif /* IEM_WITH_SETJMP */
461
462
463#ifndef IEM_WITH_SETJMP
464
465/**
466 * Fetches the next opcode byte.
467 *
468 * @returns Strict VBox status code.
469 * @param pVCpu The cross context virtual CPU structure of the
470 * calling thread.
471 * @param pu8 Where to return the opcode byte.
472 */
473DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
474{
475 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
476 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
477 {
478 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
479 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
480 return VINF_SUCCESS;
481 }
482 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
483}
484
485#else /* IEM_WITH_SETJMP */
486
487/**
488 * Fetches the next opcode byte, longjmp on error.
489 *
490 * @returns The opcode byte.
491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
492 */
493DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
494{
495# ifdef IEM_WITH_CODE_TLB
496 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
497 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
498 if (RT_LIKELY( pbBuf != NULL
499 && offBuf < pVCpu->iem.s.cbInstrBuf))
500 {
501 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
502 return pbBuf[offBuf];
503 }
504# else
505 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
506 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
507 {
508 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
509 return pVCpu->iem.s.abOpcode[offOpcode];
510 }
511# endif
512 return iemOpcodeGetNextU8SlowJmp(pVCpu);
513}
514
515#endif /* IEM_WITH_SETJMP */
516
517/**
518 * Fetches the next opcode byte, returns automatically on failure.
519 *
520 * @param a_pu8 Where to return the opcode byte.
521 * @remark Implicitly references pVCpu.
522 */
523#ifndef IEM_WITH_SETJMP
524# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
525 do \
526 { \
527 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
528 if (rcStrict2 == VINF_SUCCESS) \
529 { /* likely */ } \
530 else \
531 return rcStrict2; \
532 } while (0)
533#else
534# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
535#endif /* IEM_WITH_SETJMP */
536
537
538#ifndef IEM_WITH_SETJMP
539/**
540 * Fetches the next signed byte from the opcode stream.
541 *
542 * @returns Strict VBox status code.
543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
544 * @param pi8 Where to return the signed byte.
545 */
546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
547{
548 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
549}
550#endif /* !IEM_WITH_SETJMP */
551
552
553/**
554 * Fetches the next signed byte from the opcode stream, returning automatically
555 * on failure.
556 *
557 * @param a_pi8 Where to return the signed byte.
558 * @remark Implicitly references pVCpu.
559 */
560#ifndef IEM_WITH_SETJMP
561# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
562 do \
563 { \
564 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
565 if (rcStrict2 != VINF_SUCCESS) \
566 return rcStrict2; \
567 } while (0)
568#else /* IEM_WITH_SETJMP */
569# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
570
571#endif /* IEM_WITH_SETJMP */
572
573
574#ifndef IEM_WITH_SETJMP
575/**
576 * Fetches the next signed byte from the opcode stream, extending it to
577 * unsigned 16-bit.
578 *
579 * @returns Strict VBox status code.
580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
581 * @param pu16 Where to return the unsigned word.
582 */
583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
584{
585 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
586 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
587 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
588
589 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
590 pVCpu->iem.s.offOpcode = offOpcode + 1;
591 return VINF_SUCCESS;
592}
593#endif /* !IEM_WITH_SETJMP */
594
595/**
596 * Fetches the next signed byte from the opcode stream and sign-extending it to
597 * a word, returning automatically on failure.
598 *
599 * @param a_pu16 Where to return the word.
600 * @remark Implicitly references pVCpu.
601 */
602#ifndef IEM_WITH_SETJMP
603# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
604 do \
605 { \
606 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
607 if (rcStrict2 != VINF_SUCCESS) \
608 return rcStrict2; \
609 } while (0)
610#else
611# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
612#endif
613
614#ifndef IEM_WITH_SETJMP
615/**
616 * Fetches the next signed byte from the opcode stream, extending it to
617 * unsigned 32-bit.
618 *
619 * @returns Strict VBox status code.
620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
621 * @param pu32 Where to return the unsigned dword.
622 */
623DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
624{
625 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
626 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
627 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
628
629 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
630 pVCpu->iem.s.offOpcode = offOpcode + 1;
631 return VINF_SUCCESS;
632}
633#endif /* !IEM_WITH_SETJMP */
634
635/**
636 * Fetches the next signed byte from the opcode stream and sign-extending it to
637 * a word, returning automatically on failure.
638 *
639 * @param a_pu32 Where to return the word.
640 * @remark Implicitly references pVCpu.
641 */
642#ifndef IEM_WITH_SETJMP
643# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
644 do \
645 { \
646 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
647 if (rcStrict2 != VINF_SUCCESS) \
648 return rcStrict2; \
649 } while (0)
650#else
651# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
652#endif
653
654
655#ifndef IEM_WITH_SETJMP
656/**
657 * Fetches the next signed byte from the opcode stream, extending it to
658 * unsigned 64-bit.
659 *
660 * @returns Strict VBox status code.
661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
662 * @param pu64 Where to return the unsigned qword.
663 */
664DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
665{
666 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
667 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
668 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
669
670 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
671 pVCpu->iem.s.offOpcode = offOpcode + 1;
672 return VINF_SUCCESS;
673}
674#endif /* !IEM_WITH_SETJMP */
675
676/**
677 * Fetches the next signed byte from the opcode stream and sign-extending it to
678 * a word, returning automatically on failure.
679 *
680 * @param a_pu64 Where to return the word.
681 * @remark Implicitly references pVCpu.
682 */
683#ifndef IEM_WITH_SETJMP
684# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
685 do \
686 { \
687 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
688 if (rcStrict2 != VINF_SUCCESS) \
689 return rcStrict2; \
690 } while (0)
691#else
692# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
693#endif
694
695
696#ifndef IEM_WITH_SETJMP
697/**
698 * Fetches the next opcode byte.
699 *
700 * @returns Strict VBox status code.
701 * @param pVCpu The cross context virtual CPU structure of the
702 * calling thread.
703 * @param pu8 Where to return the opcode byte.
704 */
705DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
706{
707 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
708 pVCpu->iem.s.offModRm = offOpcode;
709 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
710 {
711 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
712 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
713 return VINF_SUCCESS;
714 }
715 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
716}
717#else /* IEM_WITH_SETJMP */
718/**
719 * Fetches the next opcode byte, longjmp on error.
720 *
721 * @returns The opcode byte.
722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
723 */
724DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
725{
726# ifdef IEM_WITH_CODE_TLB
727 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
728 pVCpu->iem.s.offModRm = offBuf;
729 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
730 if (RT_LIKELY( pbBuf != NULL
731 && offBuf < pVCpu->iem.s.cbInstrBuf))
732 {
733 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
734 return pbBuf[offBuf];
735 }
736# else
737 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
738 pVCpu->iem.s.offModRm = offOpcode;
739 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
740 {
741 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
742 return pVCpu->iem.s.abOpcode[offOpcode];
743 }
744# endif
745 return iemOpcodeGetNextU8SlowJmp(pVCpu);
746}
747#endif /* IEM_WITH_SETJMP */
748
749/**
750 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
751 * on failure.
752 *
753 * Will note down the position of the ModR/M byte for VT-x exits.
754 *
755 * @param a_pbRm Where to return the RM opcode byte.
756 * @remark Implicitly references pVCpu.
757 */
758#ifndef IEM_WITH_SETJMP
759# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
760 do \
761 { \
762 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
763 if (rcStrict2 == VINF_SUCCESS) \
764 { /* likely */ } \
765 else \
766 return rcStrict2; \
767 } while (0)
768#else
769# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
770#endif /* IEM_WITH_SETJMP */
771
772
773#ifndef IEM_WITH_SETJMP
774
775/**
776 * Fetches the next opcode word.
777 *
778 * @returns Strict VBox status code.
779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
780 * @param pu16 Where to return the opcode word.
781 */
782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
783{
784 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
785 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
786 {
787 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
788# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
789 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
790# else
791 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
792# endif
793 return VINF_SUCCESS;
794 }
795 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
796}
797
798#else /* IEM_WITH_SETJMP */
799
800/**
801 * Fetches the next opcode word, longjmp on error.
802 *
803 * @returns The opcode word.
804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
805 */
806DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
807{
808# ifdef IEM_WITH_CODE_TLB
809 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
810 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
811 if (RT_LIKELY( pbBuf != NULL
812 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
813 {
814 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
815# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
816 return *(uint16_t const *)&pbBuf[offBuf];
817# else
818 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
819# endif
820 }
821# else /* !IEM_WITH_CODE_TLB */
822 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
823 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
824 {
825 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
826# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
827 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
828# else
829 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
830# endif
831 }
832# endif /* !IEM_WITH_CODE_TLB */
833 return iemOpcodeGetNextU16SlowJmp(pVCpu);
834}
835
836#endif /* IEM_WITH_SETJMP */
837
838/**
839 * Fetches the next opcode word, returns automatically on failure.
840 *
841 * @param a_pu16 Where to return the opcode word.
842 * @remark Implicitly references pVCpu.
843 */
844#ifndef IEM_WITH_SETJMP
845# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
846 do \
847 { \
848 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
849 if (rcStrict2 != VINF_SUCCESS) \
850 return rcStrict2; \
851 } while (0)
852#else
853# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
854#endif
855
856#ifndef IEM_WITH_SETJMP
857/**
858 * Fetches the next opcode word, zero extending it to a double word.
859 *
860 * @returns Strict VBox status code.
861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
862 * @param pu32 Where to return the opcode double word.
863 */
864DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
865{
866 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
867 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
868 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
869
870 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
871 pVCpu->iem.s.offOpcode = offOpcode + 2;
872 return VINF_SUCCESS;
873}
874#endif /* !IEM_WITH_SETJMP */
875
876/**
877 * Fetches the next opcode word and zero extends it to a double word, returns
878 * automatically on failure.
879 *
880 * @param a_pu32 Where to return the opcode double word.
881 * @remark Implicitly references pVCpu.
882 */
883#ifndef IEM_WITH_SETJMP
884# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
885 do \
886 { \
887 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
888 if (rcStrict2 != VINF_SUCCESS) \
889 return rcStrict2; \
890 } while (0)
891#else
892# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
893#endif
894
895#ifndef IEM_WITH_SETJMP
896/**
897 * Fetches the next opcode word, zero extending it to a quad word.
898 *
899 * @returns Strict VBox status code.
900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
901 * @param pu64 Where to return the opcode quad word.
902 */
903DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
904{
905 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
906 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
907 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
908
909 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
910 pVCpu->iem.s.offOpcode = offOpcode + 2;
911 return VINF_SUCCESS;
912}
913#endif /* !IEM_WITH_SETJMP */
914
915/**
916 * Fetches the next opcode word and zero extends it to a quad word, returns
917 * automatically on failure.
918 *
919 * @param a_pu64 Where to return the opcode quad word.
920 * @remark Implicitly references pVCpu.
921 */
922#ifndef IEM_WITH_SETJMP
923# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
924 do \
925 { \
926 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
927 if (rcStrict2 != VINF_SUCCESS) \
928 return rcStrict2; \
929 } while (0)
930#else
931# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
932#endif
933
934
935#ifndef IEM_WITH_SETJMP
936/**
937 * Fetches the next signed word from the opcode stream.
938 *
939 * @returns Strict VBox status code.
940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
941 * @param pi16 Where to return the signed word.
942 */
943DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
944{
945 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
946}
947#endif /* !IEM_WITH_SETJMP */
948
949
950/**
951 * Fetches the next signed word from the opcode stream, returning automatically
952 * on failure.
953 *
954 * @param a_pi16 Where to return the signed word.
955 * @remark Implicitly references pVCpu.
956 */
957#ifndef IEM_WITH_SETJMP
958# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
959 do \
960 { \
961 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
962 if (rcStrict2 != VINF_SUCCESS) \
963 return rcStrict2; \
964 } while (0)
965#else
966# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
967#endif
968
969#ifndef IEM_WITH_SETJMP
970
971/**
972 * Fetches the next opcode dword.
973 *
974 * @returns Strict VBox status code.
975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
976 * @param pu32 Where to return the opcode double word.
977 */
978DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
979{
980 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
981 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
982 {
983 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
984# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
985 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
986# else
987 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
988 pVCpu->iem.s.abOpcode[offOpcode + 1],
989 pVCpu->iem.s.abOpcode[offOpcode + 2],
990 pVCpu->iem.s.abOpcode[offOpcode + 3]);
991# endif
992 return VINF_SUCCESS;
993 }
994 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
995}
996
997#else /* IEM_WITH_SETJMP */
998
999/**
1000 * Fetches the next opcode dword, longjmp on error.
1001 *
1002 * @returns The opcode dword.
1003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1004 */
1005DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1006{
1007# ifdef IEM_WITH_CODE_TLB
1008 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1009 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1010 if (RT_LIKELY( pbBuf != NULL
1011 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1012 {
1013 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1014# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1015 return *(uint32_t const *)&pbBuf[offBuf];
1016# else
1017 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1018 pbBuf[offBuf + 1],
1019 pbBuf[offBuf + 2],
1020 pbBuf[offBuf + 3]);
1021# endif
1022 }
1023# else
1024 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1025 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1026 {
1027 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1028# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1029 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1030# else
1031 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1032 pVCpu->iem.s.abOpcode[offOpcode + 1],
1033 pVCpu->iem.s.abOpcode[offOpcode + 2],
1034 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1035# endif
1036 }
1037# endif
1038 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1039}
1040
1041#endif /* IEM_WITH_SETJMP */
1042
1043/**
1044 * Fetches the next opcode dword, returns automatically on failure.
1045 *
1046 * @param a_pu32 Where to return the opcode dword.
1047 * @remark Implicitly references pVCpu.
1048 */
1049#ifndef IEM_WITH_SETJMP
1050# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1051 do \
1052 { \
1053 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1054 if (rcStrict2 != VINF_SUCCESS) \
1055 return rcStrict2; \
1056 } while (0)
1057#else
1058# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1059#endif
1060
1061#ifndef IEM_WITH_SETJMP
1062/**
1063 * Fetches the next opcode dword, zero extending it to a quad word.
1064 *
1065 * @returns Strict VBox status code.
1066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1067 * @param pu64 Where to return the opcode quad word.
1068 */
1069DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1070{
1071 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1072 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1073 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1074
1075 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1076 pVCpu->iem.s.abOpcode[offOpcode + 1],
1077 pVCpu->iem.s.abOpcode[offOpcode + 2],
1078 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1079 pVCpu->iem.s.offOpcode = offOpcode + 4;
1080 return VINF_SUCCESS;
1081}
1082#endif /* !IEM_WITH_SETJMP */
1083
1084/**
1085 * Fetches the next opcode dword and zero extends it to a quad word, returns
1086 * automatically on failure.
1087 *
1088 * @param a_pu64 Where to return the opcode quad word.
1089 * @remark Implicitly references pVCpu.
1090 */
1091#ifndef IEM_WITH_SETJMP
1092# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1093 do \
1094 { \
1095 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1096 if (rcStrict2 != VINF_SUCCESS) \
1097 return rcStrict2; \
1098 } while (0)
1099#else
1100# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1101#endif
1102
1103
1104#ifndef IEM_WITH_SETJMP
1105/**
1106 * Fetches the next signed double word from the opcode stream.
1107 *
1108 * @returns Strict VBox status code.
1109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1110 * @param pi32 Where to return the signed double word.
1111 */
1112DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1113{
1114 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1115}
1116#endif
1117
1118/**
1119 * Fetches the next signed double word from the opcode stream, returning
1120 * automatically on failure.
1121 *
1122 * @param a_pi32 Where to return the signed double word.
1123 * @remark Implicitly references pVCpu.
1124 */
1125#ifndef IEM_WITH_SETJMP
1126# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1127 do \
1128 { \
1129 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1130 if (rcStrict2 != VINF_SUCCESS) \
1131 return rcStrict2; \
1132 } while (0)
1133#else
1134# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1135#endif
1136
1137#ifndef IEM_WITH_SETJMP
1138/**
1139 * Fetches the next opcode dword, sign extending it into a quad word.
1140 *
1141 * @returns Strict VBox status code.
1142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1143 * @param pu64 Where to return the opcode quad word.
1144 */
1145DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1146{
1147 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1148 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1149 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1150
1151 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1152 pVCpu->iem.s.abOpcode[offOpcode + 1],
1153 pVCpu->iem.s.abOpcode[offOpcode + 2],
1154 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1155 *pu64 = i32;
1156 pVCpu->iem.s.offOpcode = offOpcode + 4;
1157 return VINF_SUCCESS;
1158}
1159#endif /* !IEM_WITH_SETJMP */
1160
1161/**
1162 * Fetches the next opcode double word and sign extends it to a quad word,
1163 * returns automatically on failure.
1164 *
1165 * @param a_pu64 Where to return the opcode quad word.
1166 * @remark Implicitly references pVCpu.
1167 */
1168#ifndef IEM_WITH_SETJMP
1169# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1170 do \
1171 { \
1172 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1173 if (rcStrict2 != VINF_SUCCESS) \
1174 return rcStrict2; \
1175 } while (0)
1176#else
1177# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1178#endif
1179
1180#ifndef IEM_WITH_SETJMP
1181
1182/**
1183 * Fetches the next opcode qword.
1184 *
1185 * @returns Strict VBox status code.
1186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1187 * @param pu64 Where to return the opcode qword.
1188 */
1189DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1190{
1191 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1192 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1193 {
1194# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1195 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1196# else
1197 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1198 pVCpu->iem.s.abOpcode[offOpcode + 1],
1199 pVCpu->iem.s.abOpcode[offOpcode + 2],
1200 pVCpu->iem.s.abOpcode[offOpcode + 3],
1201 pVCpu->iem.s.abOpcode[offOpcode + 4],
1202 pVCpu->iem.s.abOpcode[offOpcode + 5],
1203 pVCpu->iem.s.abOpcode[offOpcode + 6],
1204 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1205# endif
1206 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1207 return VINF_SUCCESS;
1208 }
1209 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1210}
1211
1212#else /* IEM_WITH_SETJMP */
1213
1214/**
1215 * Fetches the next opcode qword, longjmp on error.
1216 *
1217 * @returns The opcode qword.
1218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1219 */
1220DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1221{
1222# ifdef IEM_WITH_CODE_TLB
1223 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1224 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1225 if (RT_LIKELY( pbBuf != NULL
1226 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1227 {
1228 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1229# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1230 return *(uint64_t const *)&pbBuf[offBuf];
1231# else
1232 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1233 pbBuf[offBuf + 1],
1234 pbBuf[offBuf + 2],
1235 pbBuf[offBuf + 3],
1236 pbBuf[offBuf + 4],
1237 pbBuf[offBuf + 5],
1238 pbBuf[offBuf + 6],
1239 pbBuf[offBuf + 7]);
1240# endif
1241 }
1242# else
1243 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1244 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1245 {
1246 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1247# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1248 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1249# else
1250 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1251 pVCpu->iem.s.abOpcode[offOpcode + 1],
1252 pVCpu->iem.s.abOpcode[offOpcode + 2],
1253 pVCpu->iem.s.abOpcode[offOpcode + 3],
1254 pVCpu->iem.s.abOpcode[offOpcode + 4],
1255 pVCpu->iem.s.abOpcode[offOpcode + 5],
1256 pVCpu->iem.s.abOpcode[offOpcode + 6],
1257 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1258# endif
1259 }
1260# endif
1261 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1262}
1263
1264#endif /* IEM_WITH_SETJMP */
1265
1266/**
1267 * Fetches the next opcode quad word, returns automatically on failure.
1268 *
1269 * @param a_pu64 Where to return the opcode quad word.
1270 * @remark Implicitly references pVCpu.
1271 */
1272#ifndef IEM_WITH_SETJMP
1273# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1274 do \
1275 { \
1276 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1277 if (rcStrict2 != VINF_SUCCESS) \
1278 return rcStrict2; \
1279 } while (0)
1280#else
1281# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1282#endif
1283
1284
1285/** @name Misc Worker Functions.
1286 * @{
1287 */
1288
1289/**
1290 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1291 * not (kind of obsolete now).
1292 *
1293 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1294 */
1295#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1296
1297/**
1298 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1299 *
1300 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1301 * @param a_fEfl The new EFLAGS.
1302 */
1303#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1304
1305
1306/**
1307 * Loads a NULL data selector into a selector register, both the hidden and
1308 * visible parts, in protected mode.
1309 *
1310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1311 * @param pSReg Pointer to the segment register.
1312 * @param uRpl The RPL.
1313 */
1314DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1315{
1316 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1317 * data selector in protected mode. */
1318 pSReg->Sel = uRpl;
1319 pSReg->ValidSel = uRpl;
1320 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1321 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1322 {
1323 /* VT-x (Intel 3960x) observed doing something like this. */
1324 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
1325 pSReg->u32Limit = UINT32_MAX;
1326 pSReg->u64Base = 0;
1327 }
1328 else
1329 {
1330 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1331 pSReg->u32Limit = 0;
1332 pSReg->u64Base = 0;
1333 }
1334}
1335
1336/** @} */
1337
1338
1339/*
1340 *
1341 * Helpers routines.
1342 * Helpers routines.
1343 * Helpers routines.
1344 *
1345 */
1346
1347/**
1348 * Recalculates the effective operand size.
1349 *
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 */
1352DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1353{
1354 switch (pVCpu->iem.s.enmCpuMode)
1355 {
1356 case IEMMODE_16BIT:
1357 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1358 break;
1359 case IEMMODE_32BIT:
1360 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1361 break;
1362 case IEMMODE_64BIT:
1363 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1364 {
1365 case 0:
1366 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1367 break;
1368 case IEM_OP_PRF_SIZE_OP:
1369 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1370 break;
1371 case IEM_OP_PRF_SIZE_REX_W:
1372 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1373 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1374 break;
1375 }
1376 break;
1377 default:
1378 AssertFailed();
1379 }
1380}
1381
1382
1383/**
1384 * Sets the default operand size to 64-bit and recalculates the effective
1385 * operand size.
1386 *
1387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1388 */
1389DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1390{
1391 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
1392 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1393 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1394 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1395 else
1396 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1397}
1398
1399
1400/**
1401 * Sets the default operand size to 64-bit and recalculates the effective
1402 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1403 *
1404 * This is for the relative jumps.
1405 *
1406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1407 */
1408DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1409{
1410 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
1411 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1412 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1413 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1414 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1415 else
1416 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1417}
1418
1419
1420
1421
1422/** @name Register Access.
1423 * @{
1424 */
1425
1426/**
1427 * Gets a reference (pointer) to the specified hidden segment register.
1428 *
1429 * @returns Hidden register reference.
1430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1431 * @param iSegReg The segment register.
1432 */
1433DECLINLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1434{
1435 Assert(iSegReg < X86_SREG_COUNT);
1436 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1437 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1438
1439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1440 return pSReg;
1441}
1442
1443
1444/**
1445 * Ensures that the given hidden segment register is up to date.
1446 *
1447 * @returns Hidden register reference.
1448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1449 * @param pSReg The segment register.
1450 */
1451DECLINLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1452{
1453 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1454 NOREF(pVCpu);
1455 return pSReg;
1456}
1457
1458
1459/**
1460 * Gets a reference (pointer) to the specified segment register (the selector
1461 * value).
1462 *
1463 * @returns Pointer to the selector variable.
1464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1465 * @param iSegReg The segment register.
1466 */
1467DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1468{
1469 Assert(iSegReg < X86_SREG_COUNT);
1470 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1471 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1472}
1473
1474
1475/**
1476 * Fetches the selector value of a segment register.
1477 *
1478 * @returns The selector value.
1479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1480 * @param iSegReg The segment register.
1481 */
1482DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1483{
1484 Assert(iSegReg < X86_SREG_COUNT);
1485 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1486 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1487}
1488
1489
1490/**
1491 * Fetches the base address value of a segment register.
1492 *
1493 * @returns The selector value.
1494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1495 * @param iSegReg The segment register.
1496 */
1497DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1498{
1499 Assert(iSegReg < X86_SREG_COUNT);
1500 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1501 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1502}
1503
1504
1505/**
1506 * Gets a reference (pointer) to the specified general purpose register.
1507 *
1508 * @returns Register reference.
1509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1510 * @param iReg The general purpose register.
1511 */
1512DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1513{
1514 Assert(iReg < 16);
1515 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1516}
1517
1518
1519/**
1520 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1521 *
1522 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1523 *
1524 * @returns Register reference.
1525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1526 * @param iReg The register.
1527 */
1528DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1529{
1530 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1531 {
1532 Assert(iReg < 16);
1533 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1534 }
1535 /* high 8-bit register. */
1536 Assert(iReg < 8);
1537 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1538}
1539
1540
1541/**
1542 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1543 *
1544 * @returns Register reference.
1545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1546 * @param iReg The register.
1547 */
1548DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1549{
1550 Assert(iReg < 16);
1551 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1552}
1553
1554
1555/**
1556 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1557 *
1558 * @returns Register reference.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param iReg The register.
1561 */
1562DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1563{
1564 Assert(iReg < 16);
1565 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1566}
1567
1568
1569/**
1570 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1571 *
1572 * @returns Register reference.
1573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1574 * @param iReg The register.
1575 */
1576DECLINLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1577{
1578 Assert(iReg < 16);
1579 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1580}
1581
1582
1583/**
1584 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1585 *
1586 * @returns Register reference.
1587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1588 * @param iReg The register.
1589 */
1590DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1591{
1592 Assert(iReg < 64);
1593 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1594}
1595
1596
1597/**
1598 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1599 *
1600 * @returns Register reference.
1601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1602 * @param iReg The register.
1603 */
1604DECLINLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1605{
1606 Assert(iReg < 16);
1607 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1608}
1609
1610
1611/**
1612 * Gets a reference (pointer) to the specified segment register's base address.
1613 *
1614 * @returns Segment register base address reference.
1615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1616 * @param iSegReg The segment selector.
1617 */
1618DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1619{
1620 Assert(iSegReg < X86_SREG_COUNT);
1621 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1622 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1623}
1624
1625
1626/**
1627 * Fetches the value of a 8-bit general purpose register.
1628 *
1629 * @returns The register value.
1630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1631 * @param iReg The register.
1632 */
1633DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1634{
1635 return *iemGRegRefU8(pVCpu, iReg);
1636}
1637
1638
1639/**
1640 * Fetches the value of a 16-bit general purpose register.
1641 *
1642 * @returns The register value.
1643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1644 * @param iReg The register.
1645 */
1646DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1647{
1648 Assert(iReg < 16);
1649 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1650}
1651
1652
1653/**
1654 * Fetches the value of a 32-bit general purpose register.
1655 *
1656 * @returns The register value.
1657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1658 * @param iReg The register.
1659 */
1660DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1661{
1662 Assert(iReg < 16);
1663 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1664}
1665
1666
1667/**
1668 * Fetches the value of a 64-bit general purpose register.
1669 *
1670 * @returns The register value.
1671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1672 * @param iReg The register.
1673 */
1674DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1675{
1676 Assert(iReg < 16);
1677 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1678}
1679
1680
1681/**
1682 * Get the address of the top of the stack.
1683 *
1684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1685 */
1686DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1687{
1688 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1689 return pVCpu->cpum.GstCtx.rsp;
1690 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1691 return pVCpu->cpum.GstCtx.esp;
1692 return pVCpu->cpum.GstCtx.sp;
1693}
1694
1695
1696/**
1697 * Updates the RIP/EIP/IP to point to the next instruction.
1698 *
1699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1700 * @param cbInstr The number of bytes to add.
1701 */
1702DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1703{
1704 /*
1705 * Advance RIP.
1706 *
1707 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
1708 * while in all other modes except LM64 the updates are 32-bit. This means
1709 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
1710 * 4GB and 64KB rollovers, and decide whether anything needs masking.
1711 *
1712 * See PC wrap around tests in bs3-cpu-weird-1.
1713 */
1714 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
1715 uint64_t const uRipNext = uRipPrev + cbInstr;
1716 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
1717 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT))
1718 pVCpu->cpum.GstCtx.rip = uRipNext;
1719 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
1720 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
1721 else
1722 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
1723}
1724
1725
1726/**
1727 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
1728 * following EFLAGS bits are set:
1729 * - X86_EFL_RF - clear it.
1730 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
1731 * - X86_EFL_TF - generate single step \#DB trap.
1732 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
1733 * instruction).
1734 *
1735 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
1736 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
1737 * takes priority over both NMIs and hardware interrupts. So, neither is
1738 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
1739 * either unsupported will be triggered on-top of any \#DB raised here.)
1740 *
1741 * The RF flag only needs to be cleared here as it only suppresses instruction
1742 * breakpoints which are not raised here (happens synchronously during
1743 * instruction fetching).
1744 *
1745 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
1746 * status has no bearing on whether \#DB exceptions are raised.
1747 *
1748 * @note This must *NOT* be called by the two instructions setting the
1749 * CPUMCTX_INHIBIT_SHADOW_SS flag.
1750 *
1751 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
1752 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
1753 * Stacks}
1754 */
1755static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1756{
1757 /*
1758 * Normally we're just here to clear RF and/or interrupt shadow bits.
1759 */
1760 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
1761 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
1762 else
1763 {
1764 /*
1765 * Raise a #DB or/and DBGF event.
1766 */
1767 VBOXSTRICTRC rcStrict;
1768 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
1769 {
1770 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1771 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1772 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
1773 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
1774 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1775 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
1776 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1777 pVCpu->cpum.GstCtx.rflags.uBoth));
1778
1779 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
1780 rcStrict = iemRaiseDebugException(pVCpu);
1781
1782 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
1783 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
1784 {
1785 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
1786 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
1787 }
1788 }
1789 else
1790 {
1791 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
1792 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
1793 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
1794 }
1795 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
1796 return rcStrict;
1797 }
1798 return VINF_SUCCESS;
1799}
1800
1801
1802/**
1803 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
1804 *
1805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1806 */
1807DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1808{
1809 /*
1810 * We assume that most of the time nothing actually needs doing here.
1811 */
1812 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
1813 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
1814 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
1815 return VINF_SUCCESS;
1816 return iemFinishInstructionWithFlagsSet(pVCpu);
1817}
1818
1819
1820/**
1821 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
1822 * and CPUMCTX_INHIBIT_SHADOW.
1823 *
1824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1825 * @param cbInstr The number of bytes to add.
1826 */
1827DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1828{
1829 iemRegAddToRip(pVCpu, cbInstr);
1830 return iemRegFinishClearingRF(pVCpu);
1831}
1832
1833
1834/**
1835 * Extended version of iemFinishInstructionWithFlagsSet that goes with
1836 * iemRegAddToRipAndFinishingClearingRfEx.
1837 *
1838 * See iemFinishInstructionWithFlagsSet() for details.
1839 */
1840static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
1841{
1842 /*
1843 * Raise a #DB.
1844 */
1845 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1846 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
1847 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
1848 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1849 /** @todo Do we set all pending \#DB events, or just one? */
1850 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
1851 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
1852 pVCpu->cpum.GstCtx.rflags.uBoth));
1853 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
1854 return iemRaiseDebugException(pVCpu);
1855}
1856
1857
1858/**
1859 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
1860 * others potentially updating EFLAGS.TF.
1861 *
1862 * The single step event must be generated using the TF value at the start of
1863 * the instruction, not the new value set by it.
1864 *
1865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1866 * @param cbInstr The number of bytes to add.
1867 * @param fEflOld The EFLAGS at the start of the instruction
1868 * execution.
1869 */
1870DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
1871{
1872 iemRegAddToRip(pVCpu, cbInstr);
1873 if (!(fEflOld & X86_EFL_TF))
1874 return iemRegFinishClearingRF(pVCpu);
1875 return iemFinishInstructionWithTfSet(pVCpu);
1876}
1877
1878
1879/**
1880 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
1881 *
1882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1883 */
1884DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
1885{
1886 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
1887}
1888
1889
1890/**
1891 * Adds to the stack pointer.
1892 *
1893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1894 * @param cbToAdd The number of bytes to add (8-bit!).
1895 */
1896DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
1897{
1898 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1899 pVCpu->cpum.GstCtx.rsp += cbToAdd;
1900 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1901 pVCpu->cpum.GstCtx.esp += cbToAdd;
1902 else
1903 pVCpu->cpum.GstCtx.sp += cbToAdd;
1904}
1905
1906
1907/**
1908 * Subtracts from the stack pointer.
1909 *
1910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1911 * @param cbToSub The number of bytes to subtract (8-bit!).
1912 */
1913DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
1914{
1915 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1916 pVCpu->cpum.GstCtx.rsp -= cbToSub;
1917 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1918 pVCpu->cpum.GstCtx.esp -= cbToSub;
1919 else
1920 pVCpu->cpum.GstCtx.sp -= cbToSub;
1921}
1922
1923
1924/**
1925 * Adds to the temporary stack pointer.
1926 *
1927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1928 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1929 * @param cbToAdd The number of bytes to add (16-bit).
1930 */
1931DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
1932{
1933 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1934 pTmpRsp->u += cbToAdd;
1935 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1936 pTmpRsp->DWords.dw0 += cbToAdd;
1937 else
1938 pTmpRsp->Words.w0 += cbToAdd;
1939}
1940
1941
1942/**
1943 * Subtracts from the temporary stack pointer.
1944 *
1945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1946 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1947 * @param cbToSub The number of bytes to subtract.
1948 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
1949 * expecting that.
1950 */
1951DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
1952{
1953 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1954 pTmpRsp->u -= cbToSub;
1955 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1956 pTmpRsp->DWords.dw0 -= cbToSub;
1957 else
1958 pTmpRsp->Words.w0 -= cbToSub;
1959}
1960
1961
1962/**
1963 * Calculates the effective stack address for a push of the specified size as
1964 * well as the new RSP value (upper bits may be masked).
1965 *
1966 * @returns Effective stack addressf for the push.
1967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1968 * @param cbItem The size of the stack item to pop.
1969 * @param puNewRsp Where to return the new RSP value.
1970 */
1971DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
1972{
1973 RTUINT64U uTmpRsp;
1974 RTGCPTR GCPtrTop;
1975 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
1976
1977 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1978 GCPtrTop = uTmpRsp.u -= cbItem;
1979 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1980 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
1981 else
1982 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
1983 *puNewRsp = uTmpRsp.u;
1984 return GCPtrTop;
1985}
1986
1987
1988/**
1989 * Gets the current stack pointer and calculates the value after a pop of the
1990 * specified size.
1991 *
1992 * @returns Current stack pointer.
1993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1994 * @param cbItem The size of the stack item to pop.
1995 * @param puNewRsp Where to return the new RSP value.
1996 */
1997DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
1998{
1999 RTUINT64U uTmpRsp;
2000 RTGCPTR GCPtrTop;
2001 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2002
2003 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2004 {
2005 GCPtrTop = uTmpRsp.u;
2006 uTmpRsp.u += cbItem;
2007 }
2008 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2009 {
2010 GCPtrTop = uTmpRsp.DWords.dw0;
2011 uTmpRsp.DWords.dw0 += cbItem;
2012 }
2013 else
2014 {
2015 GCPtrTop = uTmpRsp.Words.w0;
2016 uTmpRsp.Words.w0 += cbItem;
2017 }
2018 *puNewRsp = uTmpRsp.u;
2019 return GCPtrTop;
2020}
2021
2022
2023/**
2024 * Calculates the effective stack address for a push of the specified size as
2025 * well as the new temporary RSP value (upper bits may be masked).
2026 *
2027 * @returns Effective stack addressf for the push.
2028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2029 * @param pTmpRsp The temporary stack pointer. This is updated.
2030 * @param cbItem The size of the stack item to pop.
2031 */
2032DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2033{
2034 RTGCPTR GCPtrTop;
2035
2036 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2037 GCPtrTop = pTmpRsp->u -= cbItem;
2038 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2039 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2040 else
2041 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2042 return GCPtrTop;
2043}
2044
2045
2046/**
2047 * Gets the effective stack address for a pop of the specified size and
2048 * calculates and updates the temporary RSP.
2049 *
2050 * @returns Current stack pointer.
2051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2052 * @param pTmpRsp The temporary stack pointer. This is updated.
2053 * @param cbItem The size of the stack item to pop.
2054 */
2055DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2056{
2057 RTGCPTR GCPtrTop;
2058 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2059 {
2060 GCPtrTop = pTmpRsp->u;
2061 pTmpRsp->u += cbItem;
2062 }
2063 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2064 {
2065 GCPtrTop = pTmpRsp->DWords.dw0;
2066 pTmpRsp->DWords.dw0 += cbItem;
2067 }
2068 else
2069 {
2070 GCPtrTop = pTmpRsp->Words.w0;
2071 pTmpRsp->Words.w0 += cbItem;
2072 }
2073 return GCPtrTop;
2074}
2075
2076/** @} */
2077
2078
2079/** @name FPU access and helpers.
2080 *
2081 * @{
2082 */
2083
2084
2085/**
2086 * Hook for preparing to use the host FPU.
2087 *
2088 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2089 *
2090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2091 */
2092DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2093{
2094#ifdef IN_RING3
2095 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2096#else
2097 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2098#endif
2099 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2100}
2101
2102
2103/**
2104 * Hook for preparing to use the host FPU for SSE.
2105 *
2106 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2107 *
2108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2109 */
2110DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2111{
2112 iemFpuPrepareUsage(pVCpu);
2113}
2114
2115
2116/**
2117 * Hook for preparing to use the host FPU for AVX.
2118 *
2119 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2120 *
2121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2122 */
2123DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2124{
2125 iemFpuPrepareUsage(pVCpu);
2126}
2127
2128
2129/**
2130 * Hook for actualizing the guest FPU state before the interpreter reads it.
2131 *
2132 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2133 *
2134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2135 */
2136DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2137{
2138#ifdef IN_RING3
2139 NOREF(pVCpu);
2140#else
2141 CPUMRZFpuStateActualizeForRead(pVCpu);
2142#endif
2143 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2144}
2145
2146
2147/**
2148 * Hook for actualizing the guest FPU state before the interpreter changes it.
2149 *
2150 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2151 *
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 */
2154DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2155{
2156#ifdef IN_RING3
2157 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2158#else
2159 CPUMRZFpuStateActualizeForChange(pVCpu);
2160#endif
2161 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2162}
2163
2164
2165/**
2166 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2167 * only.
2168 *
2169 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2170 *
2171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2172 */
2173DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2174{
2175#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2176 NOREF(pVCpu);
2177#else
2178 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2179#endif
2180 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2181}
2182
2183
2184/**
2185 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2186 * read+write.
2187 *
2188 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2189 *
2190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2191 */
2192DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2193{
2194#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2195 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2196#else
2197 CPUMRZFpuStateActualizeForChange(pVCpu);
2198#endif
2199 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2200
2201 /* Make sure any changes are loaded the next time around. */
2202 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2203}
2204
2205
2206/**
2207 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2208 * only.
2209 *
2210 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2211 *
2212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2213 */
2214DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2215{
2216#ifdef IN_RING3
2217 NOREF(pVCpu);
2218#else
2219 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2220#endif
2221 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2222}
2223
2224
2225/**
2226 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2227 * read+write.
2228 *
2229 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2230 *
2231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2232 */
2233DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2234{
2235#ifdef IN_RING3
2236 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2237#else
2238 CPUMRZFpuStateActualizeForChange(pVCpu);
2239#endif
2240 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2241
2242 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2243 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2244}
2245
2246
2247/**
2248 * Stores a QNaN value into a FPU register.
2249 *
2250 * @param pReg Pointer to the register.
2251 */
2252DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2253{
2254 pReg->au32[0] = UINT32_C(0x00000000);
2255 pReg->au32[1] = UINT32_C(0xc0000000);
2256 pReg->au16[4] = UINT16_C(0xffff);
2257}
2258
2259
2260/**
2261 * Updates the FOP, FPU.CS and FPUIP registers.
2262 *
2263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2264 * @param pFpuCtx The FPU context.
2265 */
2266DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2267{
2268 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
2269 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
2270 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2271 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2272 {
2273 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2274 * happens in real mode here based on the fnsave and fnstenv images. */
2275 pFpuCtx->CS = 0;
2276 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2277 }
2278 else if (!IEM_IS_LONG_MODE(pVCpu))
2279 {
2280 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
2281 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2282 }
2283 else
2284 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2285}
2286
2287
2288
2289
2290
2291/**
2292 * Marks the specified stack register as free (for FFREE).
2293 *
2294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2295 * @param iStReg The register to free.
2296 */
2297DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2298{
2299 Assert(iStReg < 8);
2300 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2301 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2302 pFpuCtx->FTW &= ~RT_BIT(iReg);
2303}
2304
2305
2306/**
2307 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
2308 *
2309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2310 */
2311DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2312{
2313 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2314 uint16_t uFsw = pFpuCtx->FSW;
2315 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2316 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2317 uFsw &= ~X86_FSW_TOP_MASK;
2318 uFsw |= uTop;
2319 pFpuCtx->FSW = uFsw;
2320}
2321
2322
2323/**
2324 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
2325 *
2326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2327 */
2328DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2329{
2330 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2331 uint16_t uFsw = pFpuCtx->FSW;
2332 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2333 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2334 uFsw &= ~X86_FSW_TOP_MASK;
2335 uFsw |= uTop;
2336 pFpuCtx->FSW = uFsw;
2337}
2338
2339
2340
2341
2342DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2343{
2344 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2345 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2346 if (pFpuCtx->FTW & RT_BIT(iReg))
2347 return VINF_SUCCESS;
2348 return VERR_NOT_FOUND;
2349}
2350
2351
2352DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
2353{
2354 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2355 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2356 if (pFpuCtx->FTW & RT_BIT(iReg))
2357 {
2358 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
2359 return VINF_SUCCESS;
2360 }
2361 return VERR_NOT_FOUND;
2362}
2363
2364
2365DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
2366 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
2367{
2368 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2369 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2370 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2371 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2372 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2373 {
2374 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2375 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
2376 return VINF_SUCCESS;
2377 }
2378 return VERR_NOT_FOUND;
2379}
2380
2381
2382DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
2383{
2384 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2385 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2386 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2387 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2388 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2389 {
2390 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2391 return VINF_SUCCESS;
2392 }
2393 return VERR_NOT_FOUND;
2394}
2395
2396
2397/**
2398 * Rotates the stack registers when setting new TOS.
2399 *
2400 * @param pFpuCtx The FPU context.
2401 * @param iNewTop New TOS value.
2402 * @remarks We only do this to speed up fxsave/fxrstor which
2403 * arrange the FP registers in stack order.
2404 * MUST be done before writing the new TOS (FSW).
2405 */
2406DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
2407{
2408 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2409 RTFLOAT80U ar80Temp[8];
2410
2411 if (iOldTop == iNewTop)
2412 return;
2413
2414 /* Unscrew the stack and get it into 'native' order. */
2415 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
2416 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
2417 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
2418 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
2419 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
2420 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
2421 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
2422 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
2423
2424 /* Now rotate the stack to the new position. */
2425 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
2426 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
2427 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
2428 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
2429 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
2430 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
2431 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
2432 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
2433}
2434
2435
2436/**
2437 * Updates the FPU exception status after FCW is changed.
2438 *
2439 * @param pFpuCtx The FPU context.
2440 */
2441DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
2442{
2443 uint16_t u16Fsw = pFpuCtx->FSW;
2444 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
2445 u16Fsw |= X86_FSW_ES | X86_FSW_B;
2446 else
2447 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
2448 pFpuCtx->FSW = u16Fsw;
2449}
2450
2451
2452/**
2453 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
2454 *
2455 * @returns The full FTW.
2456 * @param pFpuCtx The FPU context.
2457 */
2458DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
2459{
2460 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
2461 uint16_t u16Ftw = 0;
2462 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2463 for (unsigned iSt = 0; iSt < 8; iSt++)
2464 {
2465 unsigned const iReg = (iSt + iTop) & 7;
2466 if (!(u8Ftw & RT_BIT(iReg)))
2467 u16Ftw |= 3 << (iReg * 2); /* empty */
2468 else
2469 {
2470 uint16_t uTag;
2471 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
2472 if (pr80Reg->s.uExponent == 0x7fff)
2473 uTag = 2; /* Exponent is all 1's => Special. */
2474 else if (pr80Reg->s.uExponent == 0x0000)
2475 {
2476 if (pr80Reg->s.uMantissa == 0x0000)
2477 uTag = 1; /* All bits are zero => Zero. */
2478 else
2479 uTag = 2; /* Must be special. */
2480 }
2481 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
2482 uTag = 0; /* Valid. */
2483 else
2484 uTag = 2; /* Must be special. */
2485
2486 u16Ftw |= uTag << (iReg * 2);
2487 }
2488 }
2489
2490 return u16Ftw;
2491}
2492
2493
2494/**
2495 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
2496 *
2497 * @returns The compressed FTW.
2498 * @param u16FullFtw The full FTW to convert.
2499 */
2500DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
2501{
2502 uint8_t u8Ftw = 0;
2503 for (unsigned i = 0; i < 8; i++)
2504 {
2505 if ((u16FullFtw & 3) != 3 /*empty*/)
2506 u8Ftw |= RT_BIT(i);
2507 u16FullFtw >>= 2;
2508 }
2509
2510 return u8Ftw;
2511}
2512
2513/** @} */
2514
2515
2516/** @name Memory access.
2517 *
2518 * @{
2519 */
2520
2521
2522/**
2523 * Checks whether alignment checks are enabled or not.
2524 *
2525 * @returns true if enabled, false if not.
2526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2527 */
2528DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
2529{
2530 AssertCompile(X86_CR0_AM == X86_EFL_AC);
2531 return pVCpu->iem.s.uCpl == 3
2532 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
2533}
2534
2535/**
2536 * Checks if the given segment can be written to, raise the appropriate
2537 * exception if not.
2538 *
2539 * @returns VBox strict status code.
2540 *
2541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2542 * @param pHid Pointer to the hidden register.
2543 * @param iSegReg The register number.
2544 * @param pu64BaseAddr Where to return the base address to use for the
2545 * segment. (In 64-bit code it may differ from the
2546 * base in the hidden segment.)
2547 */
2548DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2549 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2550{
2551 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2552
2553 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2554 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2555 else
2556 {
2557 if (!pHid->Attr.n.u1Present)
2558 {
2559 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2560 AssertRelease(uSel == 0);
2561 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2562 return iemRaiseGeneralProtectionFault0(pVCpu);
2563 }
2564
2565 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
2566 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2567 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
2568 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2569 *pu64BaseAddr = pHid->u64Base;
2570 }
2571 return VINF_SUCCESS;
2572}
2573
2574
2575/**
2576 * Checks if the given segment can be read from, raise the appropriate
2577 * exception if not.
2578 *
2579 * @returns VBox strict status code.
2580 *
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pHid Pointer to the hidden register.
2583 * @param iSegReg The register number.
2584 * @param pu64BaseAddr Where to return the base address to use for the
2585 * segment. (In 64-bit code it may differ from the
2586 * base in the hidden segment.)
2587 */
2588DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
2589 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
2590{
2591 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2592
2593 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2594 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
2595 else
2596 {
2597 if (!pHid->Attr.n.u1Present)
2598 {
2599 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
2600 AssertRelease(uSel == 0);
2601 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
2602 return iemRaiseGeneralProtectionFault0(pVCpu);
2603 }
2604
2605 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2606 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2607 *pu64BaseAddr = pHid->u64Base;
2608 }
2609 return VINF_SUCCESS;
2610}
2611
2612
2613/**
2614 * Maps a physical page.
2615 *
2616 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2618 * @param GCPhysMem The physical address.
2619 * @param fAccess The intended access.
2620 * @param ppvMem Where to return the mapping address.
2621 * @param pLock The PGM lock.
2622 */
2623DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2624 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2625{
2626#ifdef IEM_LOG_MEMORY_WRITES
2627 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2628 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2629#endif
2630
2631 /** @todo This API may require some improving later. A private deal with PGM
2632 * regarding locking and unlocking needs to be struct. A couple of TLBs
2633 * living in PGM, but with publicly accessible inlined access methods
2634 * could perhaps be an even better solution. */
2635 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
2636 GCPhysMem,
2637 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2638 pVCpu->iem.s.fBypassHandlers,
2639 ppvMem,
2640 pLock);
2641 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
2642 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
2643
2644 return rc;
2645}
2646
2647
2648/**
2649 * Unmap a page previously mapped by iemMemPageMap.
2650 *
2651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2652 * @param GCPhysMem The physical address.
2653 * @param fAccess The intended access.
2654 * @param pvMem What iemMemPageMap returned.
2655 * @param pLock The PGM lock.
2656 */
2657DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
2658 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
2659{
2660 NOREF(pVCpu);
2661 NOREF(GCPhysMem);
2662 NOREF(fAccess);
2663 NOREF(pvMem);
2664 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
2665}
2666
2667#ifdef IEM_WITH_SETJMP
2668
2669/** @todo slim this down */
2670DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
2671 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2672{
2673 Assert(cbMem >= 1);
2674 Assert(iSegReg < X86_SREG_COUNT);
2675
2676 /*
2677 * 64-bit mode is simpler.
2678 */
2679 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2680 {
2681 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
2682 {
2683 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2684 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2685 GCPtrMem += pSel->u64Base;
2686 }
2687
2688 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2689 return GCPtrMem;
2690 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2691 }
2692 /*
2693 * 16-bit and 32-bit segmentation.
2694 */
2695 else if (iSegReg != UINT8_MAX)
2696 {
2697 /** @todo Does this apply to segments with 4G-1 limit? */
2698 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
2699 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
2700 {
2701 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2702 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
2703 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2704 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
2705 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
2706 | X86_SEL_TYPE_CODE))
2707 {
2708 case X86DESCATTR_P: /* readonly data, expand up */
2709 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
2710 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
2711 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
2712 /* expand up */
2713 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
2714 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2715 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
2716 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
2717 break;
2718
2719 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
2720 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
2721 /* expand down */
2722 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
2723 && ( pSel->Attr.n.u1DefBig
2724 || GCPtrLast32 <= UINT32_C(0xffff)) ))
2725 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2726 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
2727 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
2728 break;
2729
2730 default:
2731 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
2732 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2733 break;
2734 }
2735 }
2736 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
2737 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
2738 }
2739 /*
2740 * 32-bit flat address.
2741 */
2742 else
2743 return GCPtrMem;
2744}
2745
2746
2747/** @todo slim this down */
2748DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
2749 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
2750{
2751 Assert(cbMem >= 1);
2752 Assert(iSegReg < X86_SREG_COUNT);
2753
2754 /*
2755 * 64-bit mode is simpler.
2756 */
2757 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2758 {
2759 if (iSegReg >= X86_SREG_FS)
2760 {
2761 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2762 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
2763 GCPtrMem += pSel->u64Base;
2764 }
2765
2766 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
2767 return GCPtrMem;
2768 }
2769 /*
2770 * 16-bit and 32-bit segmentation.
2771 */
2772 else
2773 {
2774 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
2775 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
2776 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
2777 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
2778 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
2779 {
2780 /* expand up */
2781 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
2782 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
2783 && GCPtrLast32 > (uint32_t)GCPtrMem))
2784 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2785 }
2786 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
2787 {
2788 /* expand down */
2789 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
2790 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
2791 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2792 && GCPtrLast32 > (uint32_t)GCPtrMem))
2793 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
2794 }
2795 else
2796 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2797 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
2798 }
2799 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2800}
2801
2802#endif /* IEM_WITH_SETJMP */
2803
2804/**
2805 * Fakes a long mode stack selector for SS = 0.
2806 *
2807 * @param pDescSs Where to return the fake stack descriptor.
2808 * @param uDpl The DPL we want.
2809 */
2810DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
2811{
2812 pDescSs->Long.au64[0] = 0;
2813 pDescSs->Long.au64[1] = 0;
2814 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2815 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
2816 pDescSs->Long.Gen.u2Dpl = uDpl;
2817 pDescSs->Long.Gen.u1Present = 1;
2818 pDescSs->Long.Gen.u1Long = 1;
2819}
2820
2821/** @} */
2822
2823
2824#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2825
2826/**
2827 * Gets CR0 fixed-0 bits in VMX non-root mode.
2828 *
2829 * We do this rather than fetching what we report to the guest (in
2830 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
2831 * values regardless of whether unrestricted-guest feature is available on the CPU.
2832 *
2833 * @returns CR0 fixed-0 bits.
2834 * @param pVCpu The cross context virtual CPU structure.
2835 */
2836DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu) RT_NOEXCEPT
2837{
2838 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
2839 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
2840
2841 static uint64_t const s_auCr0Fixed0[2] = { VMX_V_CR0_FIXED0, VMX_V_CR0_FIXED0_UX };
2842 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2843 uint8_t const fUnrestrictedGuest = !!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2844 uint64_t const uCr0Fixed0 = s_auCr0Fixed0[fUnrestrictedGuest];
2845 Assert(!(uCr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
2846 return uCr0Fixed0;
2847}
2848
2849
2850/**
2851 * Sets virtual-APIC write emulation as pending.
2852 *
2853 * @param pVCpu The cross context virtual CPU structure.
2854 * @param offApic The offset in the virtual-APIC page that was written.
2855 */
2856DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
2857{
2858 Assert(offApic < XAPIC_OFF_END + 4);
2859
2860 /*
2861 * Record the currently updated APIC offset, as we need this later for figuring
2862 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
2863 * as for supplying the exit qualification when causing an APIC-write VM-exit.
2864 */
2865 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
2866
2867 /*
2868 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
2869 * virtualization or APIC-write emulation).
2870 */
2871 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2872 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
2873}
2874
2875#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
2876
2877#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette