VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 72262

Last change on this file since 72262 was 72251, checked in by vboxsync, 7 years ago

IEMAll: Must roll back memory changes after failed instruction fetch and exception injection too. The latter may fail non-fatally in ring-0 and raw-mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 641.9 KB
Line 
1/* $Id: IEMAll.cpp 72251 2018-05-17 17:39:01Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
415 */
416# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
417 do { \
418 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
419 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
420 } while (0)
421
422/**
423 * Check if an SVM is enabled.
424 */
425# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
426
427/**
428 * Check if an SVM control/instruction intercept is set.
429 */
430# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
431
432/**
433 * Check if an SVM read CRx intercept is set.
434 */
435# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
436
437/**
438 * Check if an SVM write CRx intercept is set.
439 */
440# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
441
442/**
443 * Check if an SVM read DRx intercept is set.
444 */
445# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
446
447/**
448 * Check if an SVM write DRx intercept is set.
449 */
450# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
451
452/**
453 * Check if an SVM exception intercept is set.
454 */
455# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
456
457/**
458 * Get the SVM pause-filter count.
459 */
460# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
461
462/**
463 * Invokes the SVM \#VMEXIT handler for the nested-guest.
464 */
465# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
466 do \
467 { \
468 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
469 } while (0)
470
471/**
472 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
473 * corresponding decode assist information.
474 */
475# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
476 do \
477 { \
478 uint64_t uExitInfo1; \
479 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
480 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
481 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
482 else \
483 uExitInfo1 = 0; \
484 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
485 } while (0)
486
487#else
488# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
489# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
490# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
491# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
492# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
493# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
494# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
495# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
496# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
497# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
498# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
499# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
500
501#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
502
503
504/*********************************************************************************************************************************
505* Global Variables *
506*********************************************************************************************************************************/
507extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
508
509
510/** Function table for the ADD instruction. */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
512{
513 iemAImpl_add_u8, iemAImpl_add_u8_locked,
514 iemAImpl_add_u16, iemAImpl_add_u16_locked,
515 iemAImpl_add_u32, iemAImpl_add_u32_locked,
516 iemAImpl_add_u64, iemAImpl_add_u64_locked
517};
518
519/** Function table for the ADC instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
521{
522 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
523 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
524 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
525 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
526};
527
528/** Function table for the SUB instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
530{
531 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
532 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
533 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
534 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
535};
536
537/** Function table for the SBB instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
539{
540 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
541 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
542 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
543 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
544};
545
546/** Function table for the OR instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
548{
549 iemAImpl_or_u8, iemAImpl_or_u8_locked,
550 iemAImpl_or_u16, iemAImpl_or_u16_locked,
551 iemAImpl_or_u32, iemAImpl_or_u32_locked,
552 iemAImpl_or_u64, iemAImpl_or_u64_locked
553};
554
555/** Function table for the XOR instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
557{
558 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
559 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
560 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
561 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
562};
563
564/** Function table for the AND instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
566{
567 iemAImpl_and_u8, iemAImpl_and_u8_locked,
568 iemAImpl_and_u16, iemAImpl_and_u16_locked,
569 iemAImpl_and_u32, iemAImpl_and_u32_locked,
570 iemAImpl_and_u64, iemAImpl_and_u64_locked
571};
572
573/** Function table for the CMP instruction.
574 * @remarks Making operand order ASSUMPTIONS.
575 */
576IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
577{
578 iemAImpl_cmp_u8, NULL,
579 iemAImpl_cmp_u16, NULL,
580 iemAImpl_cmp_u32, NULL,
581 iemAImpl_cmp_u64, NULL
582};
583
584/** Function table for the TEST instruction.
585 * @remarks Making operand order ASSUMPTIONS.
586 */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
588{
589 iemAImpl_test_u8, NULL,
590 iemAImpl_test_u16, NULL,
591 iemAImpl_test_u32, NULL,
592 iemAImpl_test_u64, NULL
593};
594
595/** Function table for the BT instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
597{
598 NULL, NULL,
599 iemAImpl_bt_u16, NULL,
600 iemAImpl_bt_u32, NULL,
601 iemAImpl_bt_u64, NULL
602};
603
604/** Function table for the BTC instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
606{
607 NULL, NULL,
608 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
609 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
610 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
611};
612
613/** Function table for the BTR instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
615{
616 NULL, NULL,
617 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
618 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
619 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
620};
621
622/** Function table for the BTS instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
624{
625 NULL, NULL,
626 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
627 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
628 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
629};
630
631/** Function table for the BSF instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
633{
634 NULL, NULL,
635 iemAImpl_bsf_u16, NULL,
636 iemAImpl_bsf_u32, NULL,
637 iemAImpl_bsf_u64, NULL
638};
639
640/** Function table for the BSR instruction. */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
642{
643 NULL, NULL,
644 iemAImpl_bsr_u16, NULL,
645 iemAImpl_bsr_u32, NULL,
646 iemAImpl_bsr_u64, NULL
647};
648
649/** Function table for the IMUL instruction. */
650IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
651{
652 NULL, NULL,
653 iemAImpl_imul_two_u16, NULL,
654 iemAImpl_imul_two_u32, NULL,
655 iemAImpl_imul_two_u64, NULL
656};
657
658/** Group 1 /r lookup table. */
659IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
660{
661 &g_iemAImpl_add,
662 &g_iemAImpl_or,
663 &g_iemAImpl_adc,
664 &g_iemAImpl_sbb,
665 &g_iemAImpl_and,
666 &g_iemAImpl_sub,
667 &g_iemAImpl_xor,
668 &g_iemAImpl_cmp
669};
670
671/** Function table for the INC instruction. */
672IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
673{
674 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
675 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
676 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
677 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
678};
679
680/** Function table for the DEC instruction. */
681IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
682{
683 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
684 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
685 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
686 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
687};
688
689/** Function table for the NEG instruction. */
690IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
691{
692 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
693 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
694 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
695 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
696};
697
698/** Function table for the NOT instruction. */
699IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
700{
701 iemAImpl_not_u8, iemAImpl_not_u8_locked,
702 iemAImpl_not_u16, iemAImpl_not_u16_locked,
703 iemAImpl_not_u32, iemAImpl_not_u32_locked,
704 iemAImpl_not_u64, iemAImpl_not_u64_locked
705};
706
707
708/** Function table for the ROL instruction. */
709IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
710{
711 iemAImpl_rol_u8,
712 iemAImpl_rol_u16,
713 iemAImpl_rol_u32,
714 iemAImpl_rol_u64
715};
716
717/** Function table for the ROR instruction. */
718IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
719{
720 iemAImpl_ror_u8,
721 iemAImpl_ror_u16,
722 iemAImpl_ror_u32,
723 iemAImpl_ror_u64
724};
725
726/** Function table for the RCL instruction. */
727IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
728{
729 iemAImpl_rcl_u8,
730 iemAImpl_rcl_u16,
731 iemAImpl_rcl_u32,
732 iemAImpl_rcl_u64
733};
734
735/** Function table for the RCR instruction. */
736IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
737{
738 iemAImpl_rcr_u8,
739 iemAImpl_rcr_u16,
740 iemAImpl_rcr_u32,
741 iemAImpl_rcr_u64
742};
743
744/** Function table for the SHL instruction. */
745IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
746{
747 iemAImpl_shl_u8,
748 iemAImpl_shl_u16,
749 iemAImpl_shl_u32,
750 iemAImpl_shl_u64
751};
752
753/** Function table for the SHR instruction. */
754IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
755{
756 iemAImpl_shr_u8,
757 iemAImpl_shr_u16,
758 iemAImpl_shr_u32,
759 iemAImpl_shr_u64
760};
761
762/** Function table for the SAR instruction. */
763IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
764{
765 iemAImpl_sar_u8,
766 iemAImpl_sar_u16,
767 iemAImpl_sar_u32,
768 iemAImpl_sar_u64
769};
770
771
772/** Function table for the MUL instruction. */
773IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
774{
775 iemAImpl_mul_u8,
776 iemAImpl_mul_u16,
777 iemAImpl_mul_u32,
778 iemAImpl_mul_u64
779};
780
781/** Function table for the IMUL instruction working implicitly on rAX. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
783{
784 iemAImpl_imul_u8,
785 iemAImpl_imul_u16,
786 iemAImpl_imul_u32,
787 iemAImpl_imul_u64
788};
789
790/** Function table for the DIV instruction. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
792{
793 iemAImpl_div_u8,
794 iemAImpl_div_u16,
795 iemAImpl_div_u32,
796 iemAImpl_div_u64
797};
798
799/** Function table for the MUL instruction. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
801{
802 iemAImpl_idiv_u8,
803 iemAImpl_idiv_u16,
804 iemAImpl_idiv_u32,
805 iemAImpl_idiv_u64
806};
807
808/** Function table for the SHLD instruction */
809IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
810{
811 iemAImpl_shld_u16,
812 iemAImpl_shld_u32,
813 iemAImpl_shld_u64,
814};
815
816/** Function table for the SHRD instruction */
817IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
818{
819 iemAImpl_shrd_u16,
820 iemAImpl_shrd_u32,
821 iemAImpl_shrd_u64,
822};
823
824
825/** Function table for the PUNPCKLBW instruction */
826IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
827/** Function table for the PUNPCKLBD instruction */
828IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
829/** Function table for the PUNPCKLDQ instruction */
830IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
831/** Function table for the PUNPCKLQDQ instruction */
832IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
833
834/** Function table for the PUNPCKHBW instruction */
835IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
836/** Function table for the PUNPCKHBD instruction */
837IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
838/** Function table for the PUNPCKHDQ instruction */
839IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
840/** Function table for the PUNPCKHQDQ instruction */
841IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
842
843/** Function table for the PXOR instruction */
844IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
845/** Function table for the PCMPEQB instruction */
846IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
847/** Function table for the PCMPEQW instruction */
848IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
849/** Function table for the PCMPEQD instruction */
850IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
851
852
853#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
854/** What IEM just wrote. */
855uint8_t g_abIemWrote[256];
856/** How much IEM just wrote. */
857size_t g_cbIemWrote;
858#endif
859
860
861/*********************************************************************************************************************************
862* Internal Functions *
863*********************************************************************************************************************************/
864IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
866IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
867IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
868/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
869IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
870IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
871IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
872IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
873IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
874IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
875IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
876IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
877IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
878IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
879IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
880IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
881#ifdef IEM_WITH_SETJMP
882DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
883DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
884DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
885DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
886DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
887#endif
888
889IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
890IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
891IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
892IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
893IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
894IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
895IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
896IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
897IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
898IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
899IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
900IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
901IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
902IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
903IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
904IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
905IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
906
907#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
908IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
909#endif
910IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
911IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
914IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
915 uint64_t uExitInfo2);
916IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
917 uint32_t uErr, uint64_t uCr2);
918#endif
919
920/**
921 * Sets the pass up status.
922 *
923 * @returns VINF_SUCCESS.
924 * @param pVCpu The cross context virtual CPU structure of the
925 * calling thread.
926 * @param rcPassUp The pass up status. Must be informational.
927 * VINF_SUCCESS is not allowed.
928 */
929IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
930{
931 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
932
933 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
934 if (rcOldPassUp == VINF_SUCCESS)
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 /* If both are EM scheduling codes, use EM priority rules. */
937 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
938 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
939 {
940 if (rcPassUp < rcOldPassUp)
941 {
942 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
943 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
944 }
945 else
946 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
947 }
948 /* Override EM scheduling with specific status code. */
949 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
950 {
951 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
952 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
953 }
954 /* Don't override specific status code, first come first served. */
955 else
956 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
957 return VINF_SUCCESS;
958}
959
960
961/**
962 * Calculates the CPU mode.
963 *
964 * This is mainly for updating IEMCPU::enmCpuMode.
965 *
966 * @returns CPU mode.
967 * @param pCtx The register context for the CPU.
968 */
969DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
970{
971 if (CPUMIsGuestIn64BitCodeEx(pCtx))
972 return IEMMODE_64BIT;
973 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
974 return IEMMODE_32BIT;
975 return IEMMODE_16BIT;
976}
977
978
979/**
980 * Initializes the execution state.
981 *
982 * @param pVCpu The cross context virtual CPU structure of the
983 * calling thread.
984 * @param fBypassHandlers Whether to bypass access handlers.
985 *
986 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
987 * side-effects in strict builds.
988 */
989DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
990{
991 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
992
993 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
994
995#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1004#endif
1005
1006#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1007 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1008#endif
1009 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1010 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1011#ifdef VBOX_STRICT
1012 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1013 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1014 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1015 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1016 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1017 pVCpu->iem.s.uRexReg = 127;
1018 pVCpu->iem.s.uRexB = 127;
1019 pVCpu->iem.s.uRexIndex = 127;
1020 pVCpu->iem.s.iEffSeg = 127;
1021 pVCpu->iem.s.idxPrefix = 127;
1022 pVCpu->iem.s.uVex3rdReg = 127;
1023 pVCpu->iem.s.uVexLength = 127;
1024 pVCpu->iem.s.fEvexStuff = 127;
1025 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1026# ifdef IEM_WITH_CODE_TLB
1027 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1028 pVCpu->iem.s.pbInstrBuf = NULL;
1029 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1030 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1031 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1032 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1033# else
1034 pVCpu->iem.s.offOpcode = 127;
1035 pVCpu->iem.s.cbOpcode = 127;
1036# endif
1037#endif
1038
1039 pVCpu->iem.s.cActiveMappings = 0;
1040 pVCpu->iem.s.iNextMapping = 0;
1041 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1042 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1043#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1044 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1045 && pCtx->cs.u64Base == 0
1046 && pCtx->cs.u32Limit == UINT32_MAX
1047 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1048 if (!pVCpu->iem.s.fInPatchCode)
1049 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1050#endif
1051
1052#ifdef IEM_VERIFICATION_MODE_FULL
1053 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1054 pVCpu->iem.s.fNoRem = true;
1055#endif
1056}
1057
1058#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1059/**
1060 * Performs a minimal reinitialization of the execution state.
1061 *
1062 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1063 * 'world-switch' types operations on the CPU. Currently only nested
1064 * hardware-virtualization uses it.
1065 *
1066 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1067 */
1068IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1069{
1070 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1071 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1072 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1073
1074 pVCpu->iem.s.uCpl = uCpl;
1075 pVCpu->iem.s.enmCpuMode = enmMode;
1076 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1077 pVCpu->iem.s.enmEffAddrMode = enmMode;
1078 if (enmMode != IEMMODE_64BIT)
1079 {
1080 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1081 pVCpu->iem.s.enmEffOpSize = enmMode;
1082 }
1083 else
1084 {
1085 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1086 pVCpu->iem.s.enmEffOpSize = enmMode;
1087 }
1088 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1089#ifndef IEM_WITH_CODE_TLB
1090 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1091 pVCpu->iem.s.offOpcode = 0;
1092 pVCpu->iem.s.cbOpcode = 0;
1093#endif
1094}
1095#endif
1096
1097/**
1098 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure of the
1101 * calling thread.
1102 */
1103DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1104{
1105 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1106#ifdef IEM_VERIFICATION_MODE_FULL
1107 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1108#endif
1109#ifdef VBOX_STRICT
1110# ifdef IEM_WITH_CODE_TLB
1111 NOREF(pVCpu);
1112# else
1113 pVCpu->iem.s.cbOpcode = 0;
1114# endif
1115#else
1116 NOREF(pVCpu);
1117#endif
1118}
1119
1120
1121/**
1122 * Initializes the decoder state.
1123 *
1124 * iemReInitDecoder is mostly a copy of this function.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure of the
1127 * calling thread.
1128 * @param fBypassHandlers Whether to bypass access handlers.
1129 */
1130DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1131{
1132 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1133
1134 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1135
1136#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1145#endif
1146
1147#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1148 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1149#endif
1150 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1151#ifdef IEM_VERIFICATION_MODE_FULL
1152 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1153 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1154#endif
1155 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1156 pVCpu->iem.s.enmCpuMode = enmMode;
1157 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffAddrMode = enmMode;
1159 if (enmMode != IEMMODE_64BIT)
1160 {
1161 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1162 pVCpu->iem.s.enmEffOpSize = enmMode;
1163 }
1164 else
1165 {
1166 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1167 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1168 }
1169 pVCpu->iem.s.fPrefixes = 0;
1170 pVCpu->iem.s.uRexReg = 0;
1171 pVCpu->iem.s.uRexB = 0;
1172 pVCpu->iem.s.uRexIndex = 0;
1173 pVCpu->iem.s.idxPrefix = 0;
1174 pVCpu->iem.s.uVex3rdReg = 0;
1175 pVCpu->iem.s.uVexLength = 0;
1176 pVCpu->iem.s.fEvexStuff = 0;
1177 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1178#ifdef IEM_WITH_CODE_TLB
1179 pVCpu->iem.s.pbInstrBuf = NULL;
1180 pVCpu->iem.s.offInstrNextByte = 0;
1181 pVCpu->iem.s.offCurInstrStart = 0;
1182# ifdef VBOX_STRICT
1183 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1184 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1185 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1186# endif
1187#else
1188 pVCpu->iem.s.offOpcode = 0;
1189 pVCpu->iem.s.cbOpcode = 0;
1190#endif
1191 pVCpu->iem.s.cActiveMappings = 0;
1192 pVCpu->iem.s.iNextMapping = 0;
1193 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1194 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1195#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202#endif
1203
1204#ifdef DBGFTRACE_ENABLED
1205 switch (enmMode)
1206 {
1207 case IEMMODE_64BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1209 break;
1210 case IEMMODE_32BIT:
1211 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1212 break;
1213 case IEMMODE_16BIT:
1214 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1215 break;
1216 }
1217#endif
1218}
1219
1220
1221/**
1222 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1223 *
1224 * This is mostly a copy of iemInitDecoder.
1225 *
1226 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1227 */
1228DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1229{
1230 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1231
1232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1233
1234#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1241 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1243#endif
1244
1245 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1246#ifdef IEM_VERIFICATION_MODE_FULL
1247 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1248 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1249#endif
1250 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1251 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1252 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1253 pVCpu->iem.s.enmEffAddrMode = enmMode;
1254 if (enmMode != IEMMODE_64BIT)
1255 {
1256 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1257 pVCpu->iem.s.enmEffOpSize = enmMode;
1258 }
1259 else
1260 {
1261 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1262 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1263 }
1264 pVCpu->iem.s.fPrefixes = 0;
1265 pVCpu->iem.s.uRexReg = 0;
1266 pVCpu->iem.s.uRexB = 0;
1267 pVCpu->iem.s.uRexIndex = 0;
1268 pVCpu->iem.s.idxPrefix = 0;
1269 pVCpu->iem.s.uVex3rdReg = 0;
1270 pVCpu->iem.s.uVexLength = 0;
1271 pVCpu->iem.s.fEvexStuff = 0;
1272 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1273#ifdef IEM_WITH_CODE_TLB
1274 if (pVCpu->iem.s.pbInstrBuf)
1275 {
1276 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1277 - pVCpu->iem.s.uInstrBufPc;
1278 if (off < pVCpu->iem.s.cbInstrBufTotal)
1279 {
1280 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1281 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1282 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1283 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1284 else
1285 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1286 }
1287 else
1288 {
1289 pVCpu->iem.s.pbInstrBuf = NULL;
1290 pVCpu->iem.s.offInstrNextByte = 0;
1291 pVCpu->iem.s.offCurInstrStart = 0;
1292 pVCpu->iem.s.cbInstrBuf = 0;
1293 pVCpu->iem.s.cbInstrBufTotal = 0;
1294 }
1295 }
1296 else
1297 {
1298 pVCpu->iem.s.offInstrNextByte = 0;
1299 pVCpu->iem.s.offCurInstrStart = 0;
1300 pVCpu->iem.s.cbInstrBuf = 0;
1301 pVCpu->iem.s.cbInstrBufTotal = 0;
1302 }
1303#else
1304 pVCpu->iem.s.cbOpcode = 0;
1305 pVCpu->iem.s.offOpcode = 0;
1306#endif
1307 Assert(pVCpu->iem.s.cActiveMappings == 0);
1308 pVCpu->iem.s.iNextMapping = 0;
1309 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1310 Assert(pVCpu->iem.s.fBypassHandlers == false);
1311#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1312 if (!pVCpu->iem.s.fInPatchCode)
1313 { /* likely */ }
1314 else
1315 {
1316 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1317 && pCtx->cs.u64Base == 0
1318 && pCtx->cs.u32Limit == UINT32_MAX
1319 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1320 if (!pVCpu->iem.s.fInPatchCode)
1321 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1322 }
1323#endif
1324
1325#ifdef DBGFTRACE_ENABLED
1326 switch (enmMode)
1327 {
1328 case IEMMODE_64BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1330 break;
1331 case IEMMODE_32BIT:
1332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1333 break;
1334 case IEMMODE_16BIT:
1335 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1336 break;
1337 }
1338#endif
1339}
1340
1341
1342
1343/**
1344 * Prefetch opcodes the first time when starting executing.
1345 *
1346 * @returns Strict VBox status code.
1347 * @param pVCpu The cross context virtual CPU structure of the
1348 * calling thread.
1349 * @param fBypassHandlers Whether to bypass access handlers.
1350 */
1351IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1352{
1353#ifdef IEM_VERIFICATION_MODE_FULL
1354 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1355#endif
1356 iemInitDecoder(pVCpu, fBypassHandlers);
1357
1358#ifdef IEM_WITH_CODE_TLB
1359 /** @todo Do ITLB lookup here. */
1360
1361#else /* !IEM_WITH_CODE_TLB */
1362
1363 /*
1364 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1365 *
1366 * First translate CS:rIP to a physical address.
1367 */
1368 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1369 uint32_t cbToTryRead;
1370 RTGCPTR GCPtrPC;
1371 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1372 {
1373 cbToTryRead = PAGE_SIZE;
1374 GCPtrPC = pCtx->rip;
1375 if (IEM_IS_CANONICAL(GCPtrPC))
1376 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1377 else
1378 return iemRaiseGeneralProtectionFault0(pVCpu);
1379 }
1380 else
1381 {
1382 uint32_t GCPtrPC32 = pCtx->eip;
1383 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1384 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1385 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1386 else
1387 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1388 if (cbToTryRead) { /* likely */ }
1389 else /* overflowed */
1390 {
1391 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1392 cbToTryRead = UINT32_MAX;
1393 }
1394 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1395 Assert(GCPtrPC <= UINT32_MAX);
1396 }
1397
1398# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1399 /* Allow interpretation of patch manager code blocks since they can for
1400 instance throw #PFs for perfectly good reasons. */
1401 if (pVCpu->iem.s.fInPatchCode)
1402 {
1403 size_t cbRead = 0;
1404 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1405 AssertRCReturn(rc, rc);
1406 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1407 return VINF_SUCCESS;
1408 }
1409# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1410
1411 RTGCPHYS GCPhys;
1412 uint64_t fFlags;
1413 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1414 if (RT_SUCCESS(rc)) { /* probable */ }
1415 else
1416 {
1417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1418 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1419 }
1420 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1421 else
1422 {
1423 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1424 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1425 }
1426 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1427 else
1428 {
1429 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1430 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1431 }
1432 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1433 /** @todo Check reserved bits and such stuff. PGM is better at doing
1434 * that, so do it when implementing the guest virtual address
1435 * TLB... */
1436
1437# ifdef IEM_VERIFICATION_MODE_FULL
1438 /*
1439 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1440 * instruction.
1441 */
1442 /** @todo optimize this differently by not using PGMPhysRead. */
1443 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1444 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1445 if ( offPrevOpcodes < cbOldOpcodes
1446 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1447 {
1448 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1449 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1450 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1451 pVCpu->iem.s.cbOpcode = cbNew;
1452 return VINF_SUCCESS;
1453 }
1454# endif
1455
1456 /*
1457 * Read the bytes at this address.
1458 */
1459 PVM pVM = pVCpu->CTX_SUFF(pVM);
1460# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1461 size_t cbActual;
1462 if ( PATMIsEnabled(pVM)
1463 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1464 {
1465 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1466 Assert(cbActual > 0);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1468 }
1469 else
1470# endif
1471 {
1472 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1473 if (cbToTryRead > cbLeftOnPage)
1474 cbToTryRead = cbLeftOnPage;
1475 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1476 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1477
1478 if (!pVCpu->iem.s.fBypassHandlers)
1479 {
1480 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1482 { /* likely */ }
1483 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1486 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1488 }
1489 else
1490 {
1491 Log((RT_SUCCESS(rcStrict)
1492 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1493 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1494 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 return rcStrict;
1496 }
1497 }
1498 else
1499 {
1500 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1501 if (RT_SUCCESS(rc))
1502 { /* likely */ }
1503 else
1504 {
1505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, rc, cbToTryRead));
1507 return rc;
1508 }
1509 }
1510 pVCpu->iem.s.cbOpcode = cbToTryRead;
1511 }
1512#endif /* !IEM_WITH_CODE_TLB */
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Invalidates the IEM TLBs.
1519 *
1520 * This is called internally as well as by PGM when moving GC mappings.
1521 *
1522 * @returns
1523 * @param pVCpu The cross context virtual CPU structure of the calling
1524 * thread.
1525 * @param fVmm Set when PGM calls us with a remapping.
1526 */
1527VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1528{
1529#ifdef IEM_WITH_CODE_TLB
1530 pVCpu->iem.s.cbInstrBufTotal = 0;
1531 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542
1543#ifdef IEM_WITH_DATA_TLB
1544 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1545 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1546 { /* very likely */ }
1547 else
1548 {
1549 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1550 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1551 while (i-- > 0)
1552 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1553 }
1554#endif
1555 NOREF(pVCpu); NOREF(fVmm);
1556}
1557
1558
1559/**
1560 * Invalidates a page in the TLBs.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling
1563 * thread.
1564 * @param GCPtr The address of the page to invalidate
1565 */
1566VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1567{
1568#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1569 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1572 uintptr_t idx = (uint8_t)GCPtr;
1573
1574# ifdef IEM_WITH_CODE_TLB
1575 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1576 {
1577 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1578 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1579 pVCpu->iem.s.cbInstrBufTotal = 0;
1580 }
1581# endif
1582
1583# ifdef IEM_WITH_DATA_TLB
1584 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1585 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1586# endif
1587#else
1588 NOREF(pVCpu); NOREF(GCPtr);
1589#endif
1590}
1591
1592
1593/**
1594 * Invalidates the host physical aspects of the IEM TLBs.
1595 *
1596 * This is called internally as well as by PGM when moving GC mappings.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 */
1601VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1602{
1603#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1604 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1605
1606# ifdef IEM_WITH_CODE_TLB
1607 pVCpu->iem.s.cbInstrBufTotal = 0;
1608# endif
1609 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1610 if (uTlbPhysRev != 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1613 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1614 }
1615 else
1616 {
1617 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1618 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1619
1620 unsigned i;
1621# ifdef IEM_WITH_CODE_TLB
1622 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1623 while (i-- > 0)
1624 {
1625 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1626 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1627 }
1628# endif
1629# ifdef IEM_WITH_DATA_TLB
1630 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1631 while (i-- > 0)
1632 {
1633 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1634 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1635 }
1636# endif
1637 }
1638#else
1639 NOREF(pVCpu);
1640#endif
1641}
1642
1643
1644/**
1645 * Invalidates the host physical aspects of the IEM TLBs.
1646 *
1647 * This is called internally as well as by PGM when moving GC mappings.
1648 *
1649 * @param pVM The cross context VM structure.
1650 *
1651 * @remarks Caller holds the PGM lock.
1652 */
1653VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1654{
1655 RT_NOREF_PV(pVM);
1656}
1657
1658#ifdef IEM_WITH_CODE_TLB
1659
1660/**
1661 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1662 * failure and jumps.
1663 *
1664 * We end up here for a number of reasons:
1665 * - pbInstrBuf isn't yet initialized.
1666 * - Advancing beyond the buffer boundrary (e.g. cross page).
1667 * - Advancing beyond the CS segment limit.
1668 * - Fetching from non-mappable page (e.g. MMIO).
1669 *
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param pvDst Where to return the bytes.
1673 * @param cbDst Number of bytes to read.
1674 *
1675 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1676 */
1677IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1678{
1679#ifdef IN_RING3
1680//__debugbreak();
1681 for (;;)
1682 {
1683 Assert(cbDst <= 8);
1684 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1685
1686 /*
1687 * We might have a partial buffer match, deal with that first to make the
1688 * rest simpler. This is the first part of the cross page/buffer case.
1689 */
1690 if (pVCpu->iem.s.pbInstrBuf != NULL)
1691 {
1692 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1693 {
1694 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1695 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1696 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1697
1698 cbDst -= cbCopy;
1699 pvDst = (uint8_t *)pvDst + cbCopy;
1700 offBuf += cbCopy;
1701 pVCpu->iem.s.offInstrNextByte += offBuf;
1702 }
1703 }
1704
1705 /*
1706 * Check segment limit, figuring how much we're allowed to access at this point.
1707 *
1708 * We will fault immediately if RIP is past the segment limit / in non-canonical
1709 * territory. If we do continue, there are one or more bytes to read before we
1710 * end up in trouble and we need to do that first before faulting.
1711 */
1712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1713 RTGCPTR GCPtrFirst;
1714 uint32_t cbMaxRead;
1715 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1716 {
1717 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1718 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1719 { /* likely */ }
1720 else
1721 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1722 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1723 }
1724 else
1725 {
1726 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1727 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1728 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1729 { /* likely */ }
1730 else
1731 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1732 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1733 if (cbMaxRead != 0)
1734 { /* likely */ }
1735 else
1736 {
1737 /* Overflowed because address is 0 and limit is max. */
1738 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1739 cbMaxRead = X86_PAGE_SIZE;
1740 }
1741 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1742 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1743 if (cbMaxRead2 < cbMaxRead)
1744 cbMaxRead = cbMaxRead2;
1745 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1746 }
1747
1748 /*
1749 * Get the TLB entry for this piece of code.
1750 */
1751 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1752 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1753 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1754 if (pTlbe->uTag == uTag)
1755 {
1756 /* likely when executing lots of code, otherwise unlikely */
1757# ifdef VBOX_WITH_STATISTICS
1758 pVCpu->iem.s.CodeTlb.cTlbHits++;
1759# endif
1760 }
1761 else
1762 {
1763 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1764# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1765 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1766 {
1767 pTlbe->uTag = uTag;
1768 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1769 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1770 pTlbe->GCPhys = NIL_RTGCPHYS;
1771 pTlbe->pbMappingR3 = NULL;
1772 }
1773 else
1774# endif
1775 {
1776 RTGCPHYS GCPhys;
1777 uint64_t fFlags;
1778 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1779 if (RT_FAILURE(rc))
1780 {
1781 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1782 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1783 }
1784
1785 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1786 pTlbe->uTag = uTag;
1787 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1788 pTlbe->GCPhys = GCPhys;
1789 pTlbe->pbMappingR3 = NULL;
1790 }
1791 }
1792
1793 /*
1794 * Check TLB page table level access flags.
1795 */
1796 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1797 {
1798 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1799 {
1800 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1801 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1802 }
1803 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1804 {
1805 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1806 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1807 }
1808 }
1809
1810# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1811 /*
1812 * Allow interpretation of patch manager code blocks since they can for
1813 * instance throw #PFs for perfectly good reasons.
1814 */
1815 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1816 { /* no unlikely */ }
1817 else
1818 {
1819 /** @todo Could be optimized this a little in ring-3 if we liked. */
1820 size_t cbRead = 0;
1821 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1822 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1823 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1824 return;
1825 }
1826# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1827
1828 /*
1829 * Look up the physical page info if necessary.
1830 */
1831 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1832 { /* not necessary */ }
1833 else
1834 {
1835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1836 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1837 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1838 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1839 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1840 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1841 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1842 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1843 }
1844
1845# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1846 /*
1847 * Try do a direct read using the pbMappingR3 pointer.
1848 */
1849 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1850 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1851 {
1852 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1853 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1854 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1855 {
1856 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1857 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1858 }
1859 else
1860 {
1861 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1862 Assert(cbInstr < cbMaxRead);
1863 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1864 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1865 }
1866 if (cbDst <= cbMaxRead)
1867 {
1868 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1869 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1870 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1871 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1872 return;
1873 }
1874 pVCpu->iem.s.pbInstrBuf = NULL;
1875
1876 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1877 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1878 }
1879 else
1880# endif
1881#if 0
1882 /*
1883 * If there is no special read handling, so we can read a bit more and
1884 * put it in the prefetch buffer.
1885 */
1886 if ( cbDst < cbMaxRead
1887 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1888 {
1889 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1890 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1892 { /* likely */ }
1893 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1894 {
1895 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1896 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1898 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1899 }
1900 else
1901 {
1902 Log((RT_SUCCESS(rcStrict)
1903 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1904 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1905 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1906 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1907 }
1908 }
1909 /*
1910 * Special read handling, so only read exactly what's needed.
1911 * This is a highly unlikely scenario.
1912 */
1913 else
1914#endif
1915 {
1916 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1917 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1918 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1919 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1920 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1921 { /* likely */ }
1922 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1923 {
1924 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1925 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1926 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1927 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1928 }
1929 else
1930 {
1931 Log((RT_SUCCESS(rcStrict)
1932 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1933 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1934 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1935 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1936 }
1937 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1938 if (cbToRead == cbDst)
1939 return;
1940 }
1941
1942 /*
1943 * More to read, loop.
1944 */
1945 cbDst -= cbMaxRead;
1946 pvDst = (uint8_t *)pvDst + cbMaxRead;
1947 }
1948#else
1949 RT_NOREF(pvDst, cbDst);
1950 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1951#endif
1952}
1953
1954#else
1955
1956/**
1957 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1958 * exception if it fails.
1959 *
1960 * @returns Strict VBox status code.
1961 * @param pVCpu The cross context virtual CPU structure of the
1962 * calling thread.
1963 * @param cbMin The minimum number of bytes relative offOpcode
1964 * that must be read.
1965 */
1966IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1967{
1968 /*
1969 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1970 *
1971 * First translate CS:rIP to a physical address.
1972 */
1973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1974 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1975 uint32_t cbToTryRead;
1976 RTGCPTR GCPtrNext;
1977 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1978 {
1979 cbToTryRead = PAGE_SIZE;
1980 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1981 if (!IEM_IS_CANONICAL(GCPtrNext))
1982 return iemRaiseGeneralProtectionFault0(pVCpu);
1983 }
1984 else
1985 {
1986 uint32_t GCPtrNext32 = pCtx->eip;
1987 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1988 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1989 if (GCPtrNext32 > pCtx->cs.u32Limit)
1990 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1991 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1992 if (!cbToTryRead) /* overflowed */
1993 {
1994 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1995 cbToTryRead = UINT32_MAX;
1996 /** @todo check out wrapping around the code segment. */
1997 }
1998 if (cbToTryRead < cbMin - cbLeft)
1999 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2000 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
2001 }
2002
2003 /* Only read up to the end of the page, and make sure we don't read more
2004 than the opcode buffer can hold. */
2005 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2006 if (cbToTryRead > cbLeftOnPage)
2007 cbToTryRead = cbLeftOnPage;
2008 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2009 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2010/** @todo r=bird: Convert assertion into undefined opcode exception? */
2011 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2012
2013# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2014 /* Allow interpretation of patch manager code blocks since they can for
2015 instance throw #PFs for perfectly good reasons. */
2016 if (pVCpu->iem.s.fInPatchCode)
2017 {
2018 size_t cbRead = 0;
2019 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2020 AssertRCReturn(rc, rc);
2021 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2022 return VINF_SUCCESS;
2023 }
2024# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2025
2026 RTGCPHYS GCPhys;
2027 uint64_t fFlags;
2028 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2029 if (RT_FAILURE(rc))
2030 {
2031 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2032 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2033 }
2034 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2035 {
2036 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2037 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2038 }
2039 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2040 {
2041 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2042 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2043 }
2044 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2045 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2046 /** @todo Check reserved bits and such stuff. PGM is better at doing
2047 * that, so do it when implementing the guest virtual address
2048 * TLB... */
2049
2050 /*
2051 * Read the bytes at this address.
2052 *
2053 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2054 * and since PATM should only patch the start of an instruction there
2055 * should be no need to check again here.
2056 */
2057 if (!pVCpu->iem.s.fBypassHandlers)
2058 {
2059 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2060 cbToTryRead, PGMACCESSORIGIN_IEM);
2061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2062 { /* likely */ }
2063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2064 {
2065 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2066 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2068 }
2069 else
2070 {
2071 Log((RT_SUCCESS(rcStrict)
2072 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2073 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2074 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2075 return rcStrict;
2076 }
2077 }
2078 else
2079 {
2080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2081 if (RT_SUCCESS(rc))
2082 { /* likely */ }
2083 else
2084 {
2085 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2086 return rc;
2087 }
2088 }
2089 pVCpu->iem.s.cbOpcode += cbToTryRead;
2090 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2091
2092 return VINF_SUCCESS;
2093}
2094
2095#endif /* !IEM_WITH_CODE_TLB */
2096#ifndef IEM_WITH_SETJMP
2097
2098/**
2099 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2100 *
2101 * @returns Strict VBox status code.
2102 * @param pVCpu The cross context virtual CPU structure of the
2103 * calling thread.
2104 * @param pb Where to return the opcode byte.
2105 */
2106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2107{
2108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2109 if (rcStrict == VINF_SUCCESS)
2110 {
2111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2112 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2113 pVCpu->iem.s.offOpcode = offOpcode + 1;
2114 }
2115 else
2116 *pb = 0;
2117 return rcStrict;
2118}
2119
2120
2121/**
2122 * Fetches the next opcode byte.
2123 *
2124 * @returns Strict VBox status code.
2125 * @param pVCpu The cross context virtual CPU structure of the
2126 * calling thread.
2127 * @param pu8 Where to return the opcode byte.
2128 */
2129DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2130{
2131 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2132 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2133 {
2134 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2135 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2136 return VINF_SUCCESS;
2137 }
2138 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2139}
2140
2141#else /* IEM_WITH_SETJMP */
2142
2143/**
2144 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2145 *
2146 * @returns The opcode byte.
2147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2148 */
2149DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2150{
2151# ifdef IEM_WITH_CODE_TLB
2152 uint8_t u8;
2153 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2154 return u8;
2155# else
2156 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2157 if (rcStrict == VINF_SUCCESS)
2158 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2160# endif
2161}
2162
2163
2164/**
2165 * Fetches the next opcode byte, longjmp on error.
2166 *
2167 * @returns The opcode byte.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 */
2170DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2171{
2172# ifdef IEM_WITH_CODE_TLB
2173 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2174 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2175 if (RT_LIKELY( pbBuf != NULL
2176 && offBuf < pVCpu->iem.s.cbInstrBuf))
2177 {
2178 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2179 return pbBuf[offBuf];
2180 }
2181# else
2182 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2183 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2184 {
2185 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2186 return pVCpu->iem.s.abOpcode[offOpcode];
2187 }
2188# endif
2189 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2190}
2191
2192#endif /* IEM_WITH_SETJMP */
2193
2194/**
2195 * Fetches the next opcode byte, returns automatically on failure.
2196 *
2197 * @param a_pu8 Where to return the opcode byte.
2198 * @remark Implicitly references pVCpu.
2199 */
2200#ifndef IEM_WITH_SETJMP
2201# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2202 do \
2203 { \
2204 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2205 if (rcStrict2 == VINF_SUCCESS) \
2206 { /* likely */ } \
2207 else \
2208 return rcStrict2; \
2209 } while (0)
2210#else
2211# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2212#endif /* IEM_WITH_SETJMP */
2213
2214
2215#ifndef IEM_WITH_SETJMP
2216/**
2217 * Fetches the next signed byte from the opcode stream.
2218 *
2219 * @returns Strict VBox status code.
2220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2221 * @param pi8 Where to return the signed byte.
2222 */
2223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2224{
2225 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2226}
2227#endif /* !IEM_WITH_SETJMP */
2228
2229
2230/**
2231 * Fetches the next signed byte from the opcode stream, returning automatically
2232 * on failure.
2233 *
2234 * @param a_pi8 Where to return the signed byte.
2235 * @remark Implicitly references pVCpu.
2236 */
2237#ifndef IEM_WITH_SETJMP
2238# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2239 do \
2240 { \
2241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2242 if (rcStrict2 != VINF_SUCCESS) \
2243 return rcStrict2; \
2244 } while (0)
2245#else /* IEM_WITH_SETJMP */
2246# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2247
2248#endif /* IEM_WITH_SETJMP */
2249
2250#ifndef IEM_WITH_SETJMP
2251
2252/**
2253 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2254 *
2255 * @returns Strict VBox status code.
2256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2257 * @param pu16 Where to return the opcode dword.
2258 */
2259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2260{
2261 uint8_t u8;
2262 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2263 if (rcStrict == VINF_SUCCESS)
2264 *pu16 = (int8_t)u8;
2265 return rcStrict;
2266}
2267
2268
2269/**
2270 * Fetches the next signed byte from the opcode stream, extending it to
2271 * unsigned 16-bit.
2272 *
2273 * @returns Strict VBox status code.
2274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2275 * @param pu16 Where to return the unsigned word.
2276 */
2277DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2278{
2279 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2280 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2281 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2282
2283 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2284 pVCpu->iem.s.offOpcode = offOpcode + 1;
2285 return VINF_SUCCESS;
2286}
2287
2288#endif /* !IEM_WITH_SETJMP */
2289
2290/**
2291 * Fetches the next signed byte from the opcode stream and sign-extending it to
2292 * a word, returning automatically on failure.
2293 *
2294 * @param a_pu16 Where to return the word.
2295 * @remark Implicitly references pVCpu.
2296 */
2297#ifndef IEM_WITH_SETJMP
2298# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2299 do \
2300 { \
2301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2302 if (rcStrict2 != VINF_SUCCESS) \
2303 return rcStrict2; \
2304 } while (0)
2305#else
2306# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2307#endif
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu32 Where to return the opcode dword.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2319{
2320 uint8_t u8;
2321 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2322 if (rcStrict == VINF_SUCCESS)
2323 *pu32 = (int8_t)u8;
2324 return rcStrict;
2325}
2326
2327
2328/**
2329 * Fetches the next signed byte from the opcode stream, extending it to
2330 * unsigned 32-bit.
2331 *
2332 * @returns Strict VBox status code.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 * @param pu32 Where to return the unsigned dword.
2335 */
2336DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2337{
2338 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2339 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2340 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2341
2342 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2343 pVCpu->iem.s.offOpcode = offOpcode + 1;
2344 return VINF_SUCCESS;
2345}
2346
2347#endif /* !IEM_WITH_SETJMP */
2348
2349/**
2350 * Fetches the next signed byte from the opcode stream and sign-extending it to
2351 * a word, returning automatically on failure.
2352 *
2353 * @param a_pu32 Where to return the word.
2354 * @remark Implicitly references pVCpu.
2355 */
2356#ifndef IEM_WITH_SETJMP
2357#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2358 do \
2359 { \
2360 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2361 if (rcStrict2 != VINF_SUCCESS) \
2362 return rcStrict2; \
2363 } while (0)
2364#else
2365# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2366#endif
2367
2368#ifndef IEM_WITH_SETJMP
2369
2370/**
2371 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu64 Where to return the opcode qword.
2376 */
2377DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2378{
2379 uint8_t u8;
2380 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2381 if (rcStrict == VINF_SUCCESS)
2382 *pu64 = (int8_t)u8;
2383 return rcStrict;
2384}
2385
2386
2387/**
2388 * Fetches the next signed byte from the opcode stream, extending it to
2389 * unsigned 64-bit.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu64 Where to return the unsigned qword.
2394 */
2395DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2396{
2397 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2398 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2399 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2400
2401 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2402 pVCpu->iem.s.offOpcode = offOpcode + 1;
2403 return VINF_SUCCESS;
2404}
2405
2406#endif /* !IEM_WITH_SETJMP */
2407
2408
2409/**
2410 * Fetches the next signed byte from the opcode stream and sign-extending it to
2411 * a word, returning automatically on failure.
2412 *
2413 * @param a_pu64 Where to return the word.
2414 * @remark Implicitly references pVCpu.
2415 */
2416#ifndef IEM_WITH_SETJMP
2417# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2418 do \
2419 { \
2420 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2421 if (rcStrict2 != VINF_SUCCESS) \
2422 return rcStrict2; \
2423 } while (0)
2424#else
2425# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2426#endif
2427
2428
2429#ifndef IEM_WITH_SETJMP
2430
2431/**
2432 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2433 *
2434 * @returns Strict VBox status code.
2435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2436 * @param pu16 Where to return the opcode word.
2437 */
2438DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2439{
2440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2441 if (rcStrict == VINF_SUCCESS)
2442 {
2443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2444# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2445 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2446# else
2447 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2448# endif
2449 pVCpu->iem.s.offOpcode = offOpcode + 2;
2450 }
2451 else
2452 *pu16 = 0;
2453 return rcStrict;
2454}
2455
2456
2457/**
2458 * Fetches the next opcode word.
2459 *
2460 * @returns Strict VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2462 * @param pu16 Where to return the opcode word.
2463 */
2464DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2465{
2466 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2467 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2468 {
2469 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2471 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2472# else
2473 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2474# endif
2475 return VINF_SUCCESS;
2476 }
2477 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2478}
2479
2480#else /* IEM_WITH_SETJMP */
2481
2482/**
2483 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2484 *
2485 * @returns The opcode word.
2486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2487 */
2488DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2489{
2490# ifdef IEM_WITH_CODE_TLB
2491 uint16_t u16;
2492 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2493 return u16;
2494# else
2495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2496 if (rcStrict == VINF_SUCCESS)
2497 {
2498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2499 pVCpu->iem.s.offOpcode += 2;
2500# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2501 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2502# else
2503 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2504# endif
2505 }
2506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2507# endif
2508}
2509
2510
2511/**
2512 * Fetches the next opcode word, longjmp on error.
2513 *
2514 * @returns The opcode word.
2515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2516 */
2517DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2518{
2519# ifdef IEM_WITH_CODE_TLB
2520 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2521 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2522 if (RT_LIKELY( pbBuf != NULL
2523 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2524 {
2525 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2527 return *(uint16_t const *)&pbBuf[offBuf];
2528# else
2529 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2530# endif
2531 }
2532# else
2533 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2534 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2535 {
2536 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2537# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2538 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2539# else
2540 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2541# endif
2542 }
2543# endif
2544 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2545}
2546
2547#endif /* IEM_WITH_SETJMP */
2548
2549
2550/**
2551 * Fetches the next opcode word, returns automatically on failure.
2552 *
2553 * @param a_pu16 Where to return the opcode word.
2554 * @remark Implicitly references pVCpu.
2555 */
2556#ifndef IEM_WITH_SETJMP
2557# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2558 do \
2559 { \
2560 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2561 if (rcStrict2 != VINF_SUCCESS) \
2562 return rcStrict2; \
2563 } while (0)
2564#else
2565# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2566#endif
2567
2568#ifndef IEM_WITH_SETJMP
2569
2570/**
2571 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2572 *
2573 * @returns Strict VBox status code.
2574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2575 * @param pu32 Where to return the opcode double word.
2576 */
2577DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2578{
2579 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2580 if (rcStrict == VINF_SUCCESS)
2581 {
2582 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2583 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2584 pVCpu->iem.s.offOpcode = offOpcode + 2;
2585 }
2586 else
2587 *pu32 = 0;
2588 return rcStrict;
2589}
2590
2591
2592/**
2593 * Fetches the next opcode word, zero extending it to a double word.
2594 *
2595 * @returns Strict VBox status code.
2596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2597 * @param pu32 Where to return the opcode double word.
2598 */
2599DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2600{
2601 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2602 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2603 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2604
2605 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2606 pVCpu->iem.s.offOpcode = offOpcode + 2;
2607 return VINF_SUCCESS;
2608}
2609
2610#endif /* !IEM_WITH_SETJMP */
2611
2612
2613/**
2614 * Fetches the next opcode word and zero extends it to a double word, returns
2615 * automatically on failure.
2616 *
2617 * @param a_pu32 Where to return the opcode double word.
2618 * @remark Implicitly references pVCpu.
2619 */
2620#ifndef IEM_WITH_SETJMP
2621# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2622 do \
2623 { \
2624 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2625 if (rcStrict2 != VINF_SUCCESS) \
2626 return rcStrict2; \
2627 } while (0)
2628#else
2629# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2630#endif
2631
2632#ifndef IEM_WITH_SETJMP
2633
2634/**
2635 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2636 *
2637 * @returns Strict VBox status code.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param pu64 Where to return the opcode quad word.
2640 */
2641DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2642{
2643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2644 if (rcStrict == VINF_SUCCESS)
2645 {
2646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2647 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2648 pVCpu->iem.s.offOpcode = offOpcode + 2;
2649 }
2650 else
2651 *pu64 = 0;
2652 return rcStrict;
2653}
2654
2655
2656/**
2657 * Fetches the next opcode word, zero extending it to a quad word.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param pu64 Where to return the opcode quad word.
2662 */
2663DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2664{
2665 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2666 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2667 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2668
2669 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2670 pVCpu->iem.s.offOpcode = offOpcode + 2;
2671 return VINF_SUCCESS;
2672}
2673
2674#endif /* !IEM_WITH_SETJMP */
2675
2676/**
2677 * Fetches the next opcode word and zero extends it to a quad word, returns
2678 * automatically on failure.
2679 *
2680 * @param a_pu64 Where to return the opcode quad word.
2681 * @remark Implicitly references pVCpu.
2682 */
2683#ifndef IEM_WITH_SETJMP
2684# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2685 do \
2686 { \
2687 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2688 if (rcStrict2 != VINF_SUCCESS) \
2689 return rcStrict2; \
2690 } while (0)
2691#else
2692# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2693#endif
2694
2695
2696#ifndef IEM_WITH_SETJMP
2697/**
2698 * Fetches the next signed word from the opcode stream.
2699 *
2700 * @returns Strict VBox status code.
2701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2702 * @param pi16 Where to return the signed word.
2703 */
2704DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2705{
2706 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2707}
2708#endif /* !IEM_WITH_SETJMP */
2709
2710
2711/**
2712 * Fetches the next signed word from the opcode stream, returning automatically
2713 * on failure.
2714 *
2715 * @param a_pi16 Where to return the signed word.
2716 * @remark Implicitly references pVCpu.
2717 */
2718#ifndef IEM_WITH_SETJMP
2719# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2720 do \
2721 { \
2722 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2723 if (rcStrict2 != VINF_SUCCESS) \
2724 return rcStrict2; \
2725 } while (0)
2726#else
2727# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2728#endif
2729
2730#ifndef IEM_WITH_SETJMP
2731
2732/**
2733 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2734 *
2735 * @returns Strict VBox status code.
2736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2737 * @param pu32 Where to return the opcode dword.
2738 */
2739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2740{
2741 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2742 if (rcStrict == VINF_SUCCESS)
2743 {
2744 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2745# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2746 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2747# else
2748 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2749 pVCpu->iem.s.abOpcode[offOpcode + 1],
2750 pVCpu->iem.s.abOpcode[offOpcode + 2],
2751 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2752# endif
2753 pVCpu->iem.s.offOpcode = offOpcode + 4;
2754 }
2755 else
2756 *pu32 = 0;
2757 return rcStrict;
2758}
2759
2760
2761/**
2762 * Fetches the next opcode dword.
2763 *
2764 * @returns Strict VBox status code.
2765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2766 * @param pu32 Where to return the opcode double word.
2767 */
2768DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2769{
2770 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2771 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2772 {
2773 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2774# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2775 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2776# else
2777 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2778 pVCpu->iem.s.abOpcode[offOpcode + 1],
2779 pVCpu->iem.s.abOpcode[offOpcode + 2],
2780 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2781# endif
2782 return VINF_SUCCESS;
2783 }
2784 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2785}
2786
2787#else /* !IEM_WITH_SETJMP */
2788
2789/**
2790 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2791 *
2792 * @returns The opcode dword.
2793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2794 */
2795DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2796{
2797# ifdef IEM_WITH_CODE_TLB
2798 uint32_t u32;
2799 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2800 return u32;
2801# else
2802 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2803 if (rcStrict == VINF_SUCCESS)
2804 {
2805 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2806 pVCpu->iem.s.offOpcode = offOpcode + 4;
2807# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2808 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2809# else
2810 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2811 pVCpu->iem.s.abOpcode[offOpcode + 1],
2812 pVCpu->iem.s.abOpcode[offOpcode + 2],
2813 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2814# endif
2815 }
2816 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2817# endif
2818}
2819
2820
2821/**
2822 * Fetches the next opcode dword, longjmp on error.
2823 *
2824 * @returns The opcode dword.
2825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2826 */
2827DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2828{
2829# ifdef IEM_WITH_CODE_TLB
2830 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2831 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2832 if (RT_LIKELY( pbBuf != NULL
2833 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2834 {
2835 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2836# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2837 return *(uint32_t const *)&pbBuf[offBuf];
2838# else
2839 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2840 pbBuf[offBuf + 1],
2841 pbBuf[offBuf + 2],
2842 pbBuf[offBuf + 3]);
2843# endif
2844 }
2845# else
2846 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2847 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2848 {
2849 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2854 pVCpu->iem.s.abOpcode[offOpcode + 1],
2855 pVCpu->iem.s.abOpcode[offOpcode + 2],
2856 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2857# endif
2858 }
2859# endif
2860 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2861}
2862
2863#endif /* !IEM_WITH_SETJMP */
2864
2865
2866/**
2867 * Fetches the next opcode dword, returns automatically on failure.
2868 *
2869 * @param a_pu32 Where to return the opcode dword.
2870 * @remark Implicitly references pVCpu.
2871 */
2872#ifndef IEM_WITH_SETJMP
2873# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2874 do \
2875 { \
2876 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2877 if (rcStrict2 != VINF_SUCCESS) \
2878 return rcStrict2; \
2879 } while (0)
2880#else
2881# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2882#endif
2883
2884#ifndef IEM_WITH_SETJMP
2885
2886/**
2887 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2888 *
2889 * @returns Strict VBox status code.
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 * @param pu64 Where to return the opcode dword.
2892 */
2893DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2894{
2895 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2896 if (rcStrict == VINF_SUCCESS)
2897 {
2898 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2899 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2900 pVCpu->iem.s.abOpcode[offOpcode + 1],
2901 pVCpu->iem.s.abOpcode[offOpcode + 2],
2902 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2903 pVCpu->iem.s.offOpcode = offOpcode + 4;
2904 }
2905 else
2906 *pu64 = 0;
2907 return rcStrict;
2908}
2909
2910
2911/**
2912 * Fetches the next opcode dword, zero extending it to a quad word.
2913 *
2914 * @returns Strict VBox status code.
2915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2916 * @param pu64 Where to return the opcode quad word.
2917 */
2918DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2919{
2920 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2921 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2922 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2923
2924 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2925 pVCpu->iem.s.abOpcode[offOpcode + 1],
2926 pVCpu->iem.s.abOpcode[offOpcode + 2],
2927 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2928 pVCpu->iem.s.offOpcode = offOpcode + 4;
2929 return VINF_SUCCESS;
2930}
2931
2932#endif /* !IEM_WITH_SETJMP */
2933
2934
2935/**
2936 * Fetches the next opcode dword and zero extends it to a quad word, returns
2937 * automatically on failure.
2938 *
2939 * @param a_pu64 Where to return the opcode quad word.
2940 * @remark Implicitly references pVCpu.
2941 */
2942#ifndef IEM_WITH_SETJMP
2943# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2944 do \
2945 { \
2946 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2947 if (rcStrict2 != VINF_SUCCESS) \
2948 return rcStrict2; \
2949 } while (0)
2950#else
2951# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2952#endif
2953
2954
2955#ifndef IEM_WITH_SETJMP
2956/**
2957 * Fetches the next signed double word from the opcode stream.
2958 *
2959 * @returns Strict VBox status code.
2960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2961 * @param pi32 Where to return the signed double word.
2962 */
2963DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2964{
2965 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2966}
2967#endif
2968
2969/**
2970 * Fetches the next signed double word from the opcode stream, returning
2971 * automatically on failure.
2972 *
2973 * @param a_pi32 Where to return the signed double word.
2974 * @remark Implicitly references pVCpu.
2975 */
2976#ifndef IEM_WITH_SETJMP
2977# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2978 do \
2979 { \
2980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2981 if (rcStrict2 != VINF_SUCCESS) \
2982 return rcStrict2; \
2983 } while (0)
2984#else
2985# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2986#endif
2987
2988#ifndef IEM_WITH_SETJMP
2989
2990/**
2991 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2992 *
2993 * @returns Strict VBox status code.
2994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2995 * @param pu64 Where to return the opcode qword.
2996 */
2997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2998{
2999 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3000 if (rcStrict == VINF_SUCCESS)
3001 {
3002 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3003 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3004 pVCpu->iem.s.abOpcode[offOpcode + 1],
3005 pVCpu->iem.s.abOpcode[offOpcode + 2],
3006 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3007 pVCpu->iem.s.offOpcode = offOpcode + 4;
3008 }
3009 else
3010 *pu64 = 0;
3011 return rcStrict;
3012}
3013
3014
3015/**
3016 * Fetches the next opcode dword, sign extending it into a quad word.
3017 *
3018 * @returns Strict VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3020 * @param pu64 Where to return the opcode quad word.
3021 */
3022DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3023{
3024 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3025 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3026 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3027
3028 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3029 pVCpu->iem.s.abOpcode[offOpcode + 1],
3030 pVCpu->iem.s.abOpcode[offOpcode + 2],
3031 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3032 *pu64 = i32;
3033 pVCpu->iem.s.offOpcode = offOpcode + 4;
3034 return VINF_SUCCESS;
3035}
3036
3037#endif /* !IEM_WITH_SETJMP */
3038
3039
3040/**
3041 * Fetches the next opcode double word and sign extends it to a quad word,
3042 * returns automatically on failure.
3043 *
3044 * @param a_pu64 Where to return the opcode quad word.
3045 * @remark Implicitly references pVCpu.
3046 */
3047#ifndef IEM_WITH_SETJMP
3048# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3049 do \
3050 { \
3051 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3052 if (rcStrict2 != VINF_SUCCESS) \
3053 return rcStrict2; \
3054 } while (0)
3055#else
3056# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3057#endif
3058
3059#ifndef IEM_WITH_SETJMP
3060
3061/**
3062 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3063 *
3064 * @returns Strict VBox status code.
3065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3066 * @param pu64 Where to return the opcode qword.
3067 */
3068DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3069{
3070 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3071 if (rcStrict == VINF_SUCCESS)
3072 {
3073 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3074# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3075 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3076# else
3077 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3078 pVCpu->iem.s.abOpcode[offOpcode + 1],
3079 pVCpu->iem.s.abOpcode[offOpcode + 2],
3080 pVCpu->iem.s.abOpcode[offOpcode + 3],
3081 pVCpu->iem.s.abOpcode[offOpcode + 4],
3082 pVCpu->iem.s.abOpcode[offOpcode + 5],
3083 pVCpu->iem.s.abOpcode[offOpcode + 6],
3084 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3085# endif
3086 pVCpu->iem.s.offOpcode = offOpcode + 8;
3087 }
3088 else
3089 *pu64 = 0;
3090 return rcStrict;
3091}
3092
3093
3094/**
3095 * Fetches the next opcode qword.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 * @param pu64 Where to return the opcode qword.
3100 */
3101DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3102{
3103 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3104 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3105 {
3106# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3107 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3108# else
3109 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3110 pVCpu->iem.s.abOpcode[offOpcode + 1],
3111 pVCpu->iem.s.abOpcode[offOpcode + 2],
3112 pVCpu->iem.s.abOpcode[offOpcode + 3],
3113 pVCpu->iem.s.abOpcode[offOpcode + 4],
3114 pVCpu->iem.s.abOpcode[offOpcode + 5],
3115 pVCpu->iem.s.abOpcode[offOpcode + 6],
3116 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3117# endif
3118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3119 return VINF_SUCCESS;
3120 }
3121 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3122}
3123
3124#else /* IEM_WITH_SETJMP */
3125
3126/**
3127 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3128 *
3129 * @returns The opcode qword.
3130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3131 */
3132DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3133{
3134# ifdef IEM_WITH_CODE_TLB
3135 uint64_t u64;
3136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3137 return u64;
3138# else
3139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3140 if (rcStrict == VINF_SUCCESS)
3141 {
3142 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3143 pVCpu->iem.s.offOpcode = offOpcode + 8;
3144# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3145 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3146# else
3147 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3],
3151 pVCpu->iem.s.abOpcode[offOpcode + 4],
3152 pVCpu->iem.s.abOpcode[offOpcode + 5],
3153 pVCpu->iem.s.abOpcode[offOpcode + 6],
3154 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3155# endif
3156 }
3157 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3158# endif
3159}
3160
3161
3162/**
3163 * Fetches the next opcode qword, longjmp on error.
3164 *
3165 * @returns The opcode qword.
3166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3167 */
3168DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3169{
3170# ifdef IEM_WITH_CODE_TLB
3171 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3172 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3173 if (RT_LIKELY( pbBuf != NULL
3174 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3175 {
3176 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3177# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3178 return *(uint64_t const *)&pbBuf[offBuf];
3179# else
3180 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3181 pbBuf[offBuf + 1],
3182 pbBuf[offBuf + 2],
3183 pbBuf[offBuf + 3],
3184 pbBuf[offBuf + 4],
3185 pbBuf[offBuf + 5],
3186 pbBuf[offBuf + 6],
3187 pbBuf[offBuf + 7]);
3188# endif
3189 }
3190# else
3191 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3192 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3193 {
3194 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3195# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3196 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3197# else
3198 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3199 pVCpu->iem.s.abOpcode[offOpcode + 1],
3200 pVCpu->iem.s.abOpcode[offOpcode + 2],
3201 pVCpu->iem.s.abOpcode[offOpcode + 3],
3202 pVCpu->iem.s.abOpcode[offOpcode + 4],
3203 pVCpu->iem.s.abOpcode[offOpcode + 5],
3204 pVCpu->iem.s.abOpcode[offOpcode + 6],
3205 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3206# endif
3207 }
3208# endif
3209 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3210}
3211
3212#endif /* IEM_WITH_SETJMP */
3213
3214/**
3215 * Fetches the next opcode quad word, returns automatically on failure.
3216 *
3217 * @param a_pu64 Where to return the opcode quad word.
3218 * @remark Implicitly references pVCpu.
3219 */
3220#ifndef IEM_WITH_SETJMP
3221# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3222 do \
3223 { \
3224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3225 if (rcStrict2 != VINF_SUCCESS) \
3226 return rcStrict2; \
3227 } while (0)
3228#else
3229# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3230#endif
3231
3232
3233/** @name Misc Worker Functions.
3234 * @{
3235 */
3236
3237/**
3238 * Gets the exception class for the specified exception vector.
3239 *
3240 * @returns The class of the specified exception.
3241 * @param uVector The exception vector.
3242 */
3243IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3244{
3245 Assert(uVector <= X86_XCPT_LAST);
3246 switch (uVector)
3247 {
3248 case X86_XCPT_DE:
3249 case X86_XCPT_TS:
3250 case X86_XCPT_NP:
3251 case X86_XCPT_SS:
3252 case X86_XCPT_GP:
3253 case X86_XCPT_SX: /* AMD only */
3254 return IEMXCPTCLASS_CONTRIBUTORY;
3255
3256 case X86_XCPT_PF:
3257 case X86_XCPT_VE: /* Intel only */
3258 return IEMXCPTCLASS_PAGE_FAULT;
3259
3260 case X86_XCPT_DF:
3261 return IEMXCPTCLASS_DOUBLE_FAULT;
3262 }
3263 return IEMXCPTCLASS_BENIGN;
3264}
3265
3266
3267/**
3268 * Evaluates how to handle an exception caused during delivery of another event
3269 * (exception / interrupt).
3270 *
3271 * @returns How to handle the recursive exception.
3272 * @param pVCpu The cross context virtual CPU structure of the
3273 * calling thread.
3274 * @param fPrevFlags The flags of the previous event.
3275 * @param uPrevVector The vector of the previous event.
3276 * @param fCurFlags The flags of the current exception.
3277 * @param uCurVector The vector of the current exception.
3278 * @param pfXcptRaiseInfo Where to store additional information about the
3279 * exception condition. Optional.
3280 */
3281VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3282 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3283{
3284 /*
3285 * Only CPU exceptions can be raised while delivering other events, software interrupt
3286 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3287 */
3288 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3289 Assert(pVCpu); RT_NOREF(pVCpu);
3290 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3291
3292 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3293 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3294 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3295 {
3296 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3297 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3298 {
3299 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3300 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3302 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3303 {
3304 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3305 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3306 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3307 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3308 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3309 }
3310 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3311 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3312 {
3313 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3314 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3315 }
3316 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3317 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3318 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3319 {
3320 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3321 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3322 }
3323 }
3324 else
3325 {
3326 if (uPrevVector == X86_XCPT_NMI)
3327 {
3328 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3329 if (uCurVector == X86_XCPT_PF)
3330 {
3331 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3332 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3333 }
3334 }
3335 else if ( uPrevVector == X86_XCPT_AC
3336 && uCurVector == X86_XCPT_AC)
3337 {
3338 enmRaise = IEMXCPTRAISE_CPU_HANG;
3339 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3340 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3341 }
3342 }
3343 }
3344 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3345 {
3346 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3347 if (uCurVector == X86_XCPT_PF)
3348 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3349 }
3350 else
3351 {
3352 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3353 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3354 }
3355
3356 if (pfXcptRaiseInfo)
3357 *pfXcptRaiseInfo = fRaiseInfo;
3358 return enmRaise;
3359}
3360
3361
3362/**
3363 * Enters the CPU shutdown state initiated by a triple fault or other
3364 * unrecoverable conditions.
3365 *
3366 * @returns Strict VBox status code.
3367 * @param pVCpu The cross context virtual CPU structure of the
3368 * calling thread.
3369 */
3370IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3371{
3372 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3373 {
3374 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3375 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3376 }
3377
3378 RT_NOREF(pVCpu);
3379 return VINF_EM_TRIPLE_FAULT;
3380}
3381
3382
3383/**
3384 * Validates a new SS segment.
3385 *
3386 * @returns VBox strict status code.
3387 * @param pVCpu The cross context virtual CPU structure of the
3388 * calling thread.
3389 * @param pCtx The CPU context.
3390 * @param NewSS The new SS selctor.
3391 * @param uCpl The CPL to load the stack for.
3392 * @param pDesc Where to return the descriptor.
3393 */
3394IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3395{
3396 NOREF(pCtx);
3397
3398 /* Null selectors are not allowed (we're not called for dispatching
3399 interrupts with SS=0 in long mode). */
3400 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3401 {
3402 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3403 return iemRaiseTaskSwitchFault0(pVCpu);
3404 }
3405
3406 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3407 if ((NewSS & X86_SEL_RPL) != uCpl)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 /*
3414 * Read the descriptor.
3415 */
3416 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3417 if (rcStrict != VINF_SUCCESS)
3418 return rcStrict;
3419
3420 /*
3421 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3422 */
3423 if (!pDesc->Legacy.Gen.u1DescType)
3424 {
3425 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3426 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3427 }
3428
3429 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3430 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3431 {
3432 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3433 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3434 }
3435 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3436 {
3437 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3438 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3439 }
3440
3441 /* Is it there? */
3442 /** @todo testcase: Is this checked before the canonical / limit check below? */
3443 if (!pDesc->Legacy.Gen.u1Present)
3444 {
3445 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3446 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3447 }
3448
3449 return VINF_SUCCESS;
3450}
3451
3452
3453/**
3454 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3455 * not.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3462 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3463 ? (a_pCtx)->eflags.u \
3464 : CPUMRawGetEFlags(a_pVCpu) )
3465#else
3466# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3467 ( (a_pCtx)->eflags.u )
3468#endif
3469
3470/**
3471 * Updates the EFLAGS in the correct manner wrt. PATM.
3472 *
3473 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3474 * @param a_pCtx The CPU context.
3475 * @param a_fEfl The new EFLAGS.
3476 */
3477#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3478# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3479 do { \
3480 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3481 (a_pCtx)->eflags.u = (a_fEfl); \
3482 else \
3483 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3484 } while (0)
3485#else
3486# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3487 do { \
3488 (a_pCtx)->eflags.u = (a_fEfl); \
3489 } while (0)
3490#endif
3491
3492
3493/** @} */
3494
3495/** @name Raising Exceptions.
3496 *
3497 * @{
3498 */
3499
3500
3501/**
3502 * Loads the specified stack far pointer from the TSS.
3503 *
3504 * @returns VBox strict status code.
3505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3506 * @param pCtx The CPU context.
3507 * @param uCpl The CPL to load the stack for.
3508 * @param pSelSS Where to return the new stack segment.
3509 * @param puEsp Where to return the new stack pointer.
3510 */
3511IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3512 PRTSEL pSelSS, uint32_t *puEsp)
3513{
3514 VBOXSTRICTRC rcStrict;
3515 Assert(uCpl < 4);
3516
3517 switch (pCtx->tr.Attr.n.u4Type)
3518 {
3519 /*
3520 * 16-bit TSS (X86TSS16).
3521 */
3522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3524 {
3525 uint32_t off = uCpl * 4 + 2;
3526 if (off + 4 <= pCtx->tr.u32Limit)
3527 {
3528 /** @todo check actual access pattern here. */
3529 uint32_t u32Tmp = 0; /* gcc maybe... */
3530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3531 if (rcStrict == VINF_SUCCESS)
3532 {
3533 *puEsp = RT_LOWORD(u32Tmp);
3534 *pSelSS = RT_HIWORD(u32Tmp);
3535 return VINF_SUCCESS;
3536 }
3537 }
3538 else
3539 {
3540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3542 }
3543 break;
3544 }
3545
3546 /*
3547 * 32-bit TSS (X86TSS32).
3548 */
3549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3551 {
3552 uint32_t off = uCpl * 8 + 4;
3553 if (off + 7 <= pCtx->tr.u32Limit)
3554 {
3555/** @todo check actual access pattern here. */
3556 uint64_t u64Tmp;
3557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3558 if (rcStrict == VINF_SUCCESS)
3559 {
3560 *puEsp = u64Tmp & UINT32_MAX;
3561 *pSelSS = (RTSEL)(u64Tmp >> 32);
3562 return VINF_SUCCESS;
3563 }
3564 }
3565 else
3566 {
3567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3569 }
3570 break;
3571 }
3572
3573 default:
3574 AssertFailed();
3575 rcStrict = VERR_IEM_IPE_4;
3576 break;
3577 }
3578
3579 *puEsp = 0; /* make gcc happy */
3580 *pSelSS = 0; /* make gcc happy */
3581 return rcStrict;
3582}
3583
3584
3585/**
3586 * Loads the specified stack pointer from the 64-bit TSS.
3587 *
3588 * @returns VBox strict status code.
3589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3590 * @param pCtx The CPU context.
3591 * @param uCpl The CPL to load the stack for.
3592 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3593 * @param puRsp Where to return the new stack pointer.
3594 */
3595IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3596{
3597 Assert(uCpl < 4);
3598 Assert(uIst < 8);
3599 *puRsp = 0; /* make gcc happy */
3600
3601 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3602
3603 uint32_t off;
3604 if (uIst)
3605 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3606 else
3607 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3608 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3609 {
3610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3612 }
3613
3614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3615}
3616
3617
3618/**
3619 * Adjust the CPU state according to the exception being raised.
3620 *
3621 * @param pCtx The CPU context.
3622 * @param u8Vector The exception that has been raised.
3623 */
3624DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3625{
3626 switch (u8Vector)
3627 {
3628 case X86_XCPT_DB:
3629 pCtx->dr[7] &= ~X86_DR7_GD;
3630 break;
3631 /** @todo Read the AMD and Intel exception reference... */
3632 }
3633}
3634
3635
3636/**
3637 * Implements exceptions and interrupts for real mode.
3638 *
3639 * @returns VBox strict status code.
3640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3641 * @param pCtx The CPU context.
3642 * @param cbInstr The number of bytes to offset rIP by in the return
3643 * address.
3644 * @param u8Vector The interrupt / exception vector number.
3645 * @param fFlags The flags.
3646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3648 */
3649IEM_STATIC VBOXSTRICTRC
3650iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3651 PCPUMCTX pCtx,
3652 uint8_t cbInstr,
3653 uint8_t u8Vector,
3654 uint32_t fFlags,
3655 uint16_t uErr,
3656 uint64_t uCr2)
3657{
3658 NOREF(uErr); NOREF(uCr2);
3659
3660 /*
3661 * Read the IDT entry.
3662 */
3663 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3664 {
3665 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3667 }
3668 RTFAR16 Idte;
3669 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3670 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3671 return rcStrict;
3672
3673 /*
3674 * Push the stack frame.
3675 */
3676 uint16_t *pu16Frame;
3677 uint64_t uNewRsp;
3678 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3679 if (rcStrict != VINF_SUCCESS)
3680 return rcStrict;
3681
3682 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3683#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3684 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3685 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3686 fEfl |= UINT16_C(0xf000);
3687#endif
3688 pu16Frame[2] = (uint16_t)fEfl;
3689 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3690 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3691 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3692 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3693 return rcStrict;
3694
3695 /*
3696 * Load the vector address into cs:ip and make exception specific state
3697 * adjustments.
3698 */
3699 pCtx->cs.Sel = Idte.sel;
3700 pCtx->cs.ValidSel = Idte.sel;
3701 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3702 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3703 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3704 pCtx->rip = Idte.off;
3705 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3706 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3707
3708 /** @todo do we actually do this in real mode? */
3709 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3710 iemRaiseXcptAdjustState(pCtx, u8Vector);
3711
3712 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3713}
3714
3715
3716/**
3717 * Loads a NULL data selector into when coming from V8086 mode.
3718 *
3719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3720 * @param pSReg Pointer to the segment register.
3721 */
3722IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3723{
3724 pSReg->Sel = 0;
3725 pSReg->ValidSel = 0;
3726 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3727 {
3728 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3729 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3730 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3731 }
3732 else
3733 {
3734 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3735 /** @todo check this on AMD-V */
3736 pSReg->u64Base = 0;
3737 pSReg->u32Limit = 0;
3738 }
3739}
3740
3741
3742/**
3743 * Loads a segment selector during a task switch in V8086 mode.
3744 *
3745 * @param pSReg Pointer to the segment register.
3746 * @param uSel The selector value to load.
3747 */
3748IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3749{
3750 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3751 pSReg->Sel = uSel;
3752 pSReg->ValidSel = uSel;
3753 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3754 pSReg->u64Base = uSel << 4;
3755 pSReg->u32Limit = 0xffff;
3756 pSReg->Attr.u = 0xf3;
3757}
3758
3759
3760/**
3761 * Loads a NULL data selector into a selector register, both the hidden and
3762 * visible parts, in protected mode.
3763 *
3764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3765 * @param pSReg Pointer to the segment register.
3766 * @param uRpl The RPL.
3767 */
3768IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3769{
3770 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3771 * data selector in protected mode. */
3772 pSReg->Sel = uRpl;
3773 pSReg->ValidSel = uRpl;
3774 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3776 {
3777 /* VT-x (Intel 3960x) observed doing something like this. */
3778 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3779 pSReg->u32Limit = UINT32_MAX;
3780 pSReg->u64Base = 0;
3781 }
3782 else
3783 {
3784 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3785 pSReg->u32Limit = 0;
3786 pSReg->u64Base = 0;
3787 }
3788}
3789
3790
3791/**
3792 * Loads a segment selector during a task switch in protected mode.
3793 *
3794 * In this task switch scenario, we would throw \#TS exceptions rather than
3795 * \#GPs.
3796 *
3797 * @returns VBox strict status code.
3798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3799 * @param pSReg Pointer to the segment register.
3800 * @param uSel The new selector value.
3801 *
3802 * @remarks This does _not_ handle CS or SS.
3803 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3804 */
3805IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3806{
3807 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3808
3809 /* Null data selector. */
3810 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3811 {
3812 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3813 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3814 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3815 return VINF_SUCCESS;
3816 }
3817
3818 /* Fetch the descriptor. */
3819 IEMSELDESC Desc;
3820 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3821 if (rcStrict != VINF_SUCCESS)
3822 {
3823 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3824 VBOXSTRICTRC_VAL(rcStrict)));
3825 return rcStrict;
3826 }
3827
3828 /* Must be a data segment or readable code segment. */
3829 if ( !Desc.Legacy.Gen.u1DescType
3830 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3831 {
3832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3833 Desc.Legacy.Gen.u4Type));
3834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3835 }
3836
3837 /* Check privileges for data segments and non-conforming code segments. */
3838 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3839 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3840 {
3841 /* The RPL and the new CPL must be less than or equal to the DPL. */
3842 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3843 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3844 {
3845 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3846 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3847 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3848 }
3849 }
3850
3851 /* Is it there? */
3852 if (!Desc.Legacy.Gen.u1Present)
3853 {
3854 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3855 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3856 }
3857
3858 /* The base and limit. */
3859 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3860 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3861
3862 /*
3863 * Ok, everything checked out fine. Now set the accessed bit before
3864 * committing the result into the registers.
3865 */
3866 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3867 {
3868 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3869 if (rcStrict != VINF_SUCCESS)
3870 return rcStrict;
3871 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3872 }
3873
3874 /* Commit */
3875 pSReg->Sel = uSel;
3876 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3877 pSReg->u32Limit = cbLimit;
3878 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3879 pSReg->ValidSel = uSel;
3880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3882 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3883
3884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3885 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3886 return VINF_SUCCESS;
3887}
3888
3889
3890/**
3891 * Performs a task switch.
3892 *
3893 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3894 * caller is responsible for performing the necessary checks (like DPL, TSS
3895 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3896 * reference for JMP, CALL, IRET.
3897 *
3898 * If the task switch is the due to a software interrupt or hardware exception,
3899 * the caller is responsible for validating the TSS selector and descriptor. See
3900 * Intel Instruction reference for INT n.
3901 *
3902 * @returns VBox strict status code.
3903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3904 * @param pCtx The CPU context.
3905 * @param enmTaskSwitch What caused this task switch.
3906 * @param uNextEip The EIP effective after the task switch.
3907 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3908 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3909 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3910 * @param SelTSS The TSS selector of the new task.
3911 * @param pNewDescTSS Pointer to the new TSS descriptor.
3912 */
3913IEM_STATIC VBOXSTRICTRC
3914iemTaskSwitch(PVMCPU pVCpu,
3915 PCPUMCTX pCtx,
3916 IEMTASKSWITCH enmTaskSwitch,
3917 uint32_t uNextEip,
3918 uint32_t fFlags,
3919 uint16_t uErr,
3920 uint64_t uCr2,
3921 RTSEL SelTSS,
3922 PIEMSELDESC pNewDescTSS)
3923{
3924 Assert(!IEM_IS_REAL_MODE(pVCpu));
3925 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3926
3927 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3928 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3929 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3930 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3931 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3932
3933 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3934 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3935
3936 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3937 fIsNewTSS386, pCtx->eip, uNextEip));
3938
3939 /* Update CR2 in case it's a page-fault. */
3940 /** @todo This should probably be done much earlier in IEM/PGM. See
3941 * @bugref{5653#c49}. */
3942 if (fFlags & IEM_XCPT_FLAGS_CR2)
3943 pCtx->cr2 = uCr2;
3944
3945 /*
3946 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3947 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3948 */
3949 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3950 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3951 if (uNewTSSLimit < uNewTSSLimitMin)
3952 {
3953 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3954 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3955 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3956 }
3957
3958 /*
3959 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3960 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3961 */
3962 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3963 {
3964 uint32_t const uExitInfo1 = SelTSS;
3965 uint32_t uExitInfo2 = uErr;
3966 switch (enmTaskSwitch)
3967 {
3968 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3969 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3970 default: break;
3971 }
3972 if (fFlags & IEM_XCPT_FLAGS_ERR)
3973 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3974 if (pCtx->eflags.Bits.u1RF)
3975 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3976
3977 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3978 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3979 RT_NOREF2(uExitInfo1, uExitInfo2);
3980 }
3981 /** @todo Nested-VMX task-switch intercept. */
3982
3983 /*
3984 * Check the current TSS limit. The last written byte to the current TSS during the
3985 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3986 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3987 *
3988 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3989 * end up with smaller than "legal" TSS limits.
3990 */
3991 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3992 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3993 if (uCurTSSLimit < uCurTSSLimitMin)
3994 {
3995 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3996 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3997 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3998 }
3999
4000 /*
4001 * Verify that the new TSS can be accessed and map it. Map only the required contents
4002 * and not the entire TSS.
4003 */
4004 void *pvNewTSS;
4005 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4006 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4007 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4008 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4009 * not perform correct translation if this happens. See Intel spec. 7.2.1
4010 * "Task-State Segment" */
4011 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4012 if (rcStrict != VINF_SUCCESS)
4013 {
4014 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4015 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4016 return rcStrict;
4017 }
4018
4019 /*
4020 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4021 */
4022 uint32_t u32EFlags = pCtx->eflags.u32;
4023 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4024 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4025 {
4026 PX86DESC pDescCurTSS;
4027 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4028 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4029 if (rcStrict != VINF_SUCCESS)
4030 {
4031 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4032 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4033 return rcStrict;
4034 }
4035
4036 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4037 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4038 if (rcStrict != VINF_SUCCESS)
4039 {
4040 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4041 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4042 return rcStrict;
4043 }
4044
4045 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4046 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4047 {
4048 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4049 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4050 u32EFlags &= ~X86_EFL_NT;
4051 }
4052 }
4053
4054 /*
4055 * Save the CPU state into the current TSS.
4056 */
4057 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4058 if (GCPtrNewTSS == GCPtrCurTSS)
4059 {
4060 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4061 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4062 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4063 }
4064 if (fIsNewTSS386)
4065 {
4066 /*
4067 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4068 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4069 */
4070 void *pvCurTSS32;
4071 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4072 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4073 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4074 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4075 if (rcStrict != VINF_SUCCESS)
4076 {
4077 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4078 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4079 return rcStrict;
4080 }
4081
4082 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4083 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4084 pCurTSS32->eip = uNextEip;
4085 pCurTSS32->eflags = u32EFlags;
4086 pCurTSS32->eax = pCtx->eax;
4087 pCurTSS32->ecx = pCtx->ecx;
4088 pCurTSS32->edx = pCtx->edx;
4089 pCurTSS32->ebx = pCtx->ebx;
4090 pCurTSS32->esp = pCtx->esp;
4091 pCurTSS32->ebp = pCtx->ebp;
4092 pCurTSS32->esi = pCtx->esi;
4093 pCurTSS32->edi = pCtx->edi;
4094 pCurTSS32->es = pCtx->es.Sel;
4095 pCurTSS32->cs = pCtx->cs.Sel;
4096 pCurTSS32->ss = pCtx->ss.Sel;
4097 pCurTSS32->ds = pCtx->ds.Sel;
4098 pCurTSS32->fs = pCtx->fs.Sel;
4099 pCurTSS32->gs = pCtx->gs.Sel;
4100
4101 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4102 if (rcStrict != VINF_SUCCESS)
4103 {
4104 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4105 VBOXSTRICTRC_VAL(rcStrict)));
4106 return rcStrict;
4107 }
4108 }
4109 else
4110 {
4111 /*
4112 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4113 */
4114 void *pvCurTSS16;
4115 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4116 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4117 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4118 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4119 if (rcStrict != VINF_SUCCESS)
4120 {
4121 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4122 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4123 return rcStrict;
4124 }
4125
4126 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4127 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4128 pCurTSS16->ip = uNextEip;
4129 pCurTSS16->flags = u32EFlags;
4130 pCurTSS16->ax = pCtx->ax;
4131 pCurTSS16->cx = pCtx->cx;
4132 pCurTSS16->dx = pCtx->dx;
4133 pCurTSS16->bx = pCtx->bx;
4134 pCurTSS16->sp = pCtx->sp;
4135 pCurTSS16->bp = pCtx->bp;
4136 pCurTSS16->si = pCtx->si;
4137 pCurTSS16->di = pCtx->di;
4138 pCurTSS16->es = pCtx->es.Sel;
4139 pCurTSS16->cs = pCtx->cs.Sel;
4140 pCurTSS16->ss = pCtx->ss.Sel;
4141 pCurTSS16->ds = pCtx->ds.Sel;
4142
4143 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4144 if (rcStrict != VINF_SUCCESS)
4145 {
4146 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4147 VBOXSTRICTRC_VAL(rcStrict)));
4148 return rcStrict;
4149 }
4150 }
4151
4152 /*
4153 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4154 */
4155 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4156 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4157 {
4158 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4159 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4160 pNewTSS->selPrev = pCtx->tr.Sel;
4161 }
4162
4163 /*
4164 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4165 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4166 */
4167 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4168 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4169 bool fNewDebugTrap;
4170 if (fIsNewTSS386)
4171 {
4172 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4173 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4174 uNewEip = pNewTSS32->eip;
4175 uNewEflags = pNewTSS32->eflags;
4176 uNewEax = pNewTSS32->eax;
4177 uNewEcx = pNewTSS32->ecx;
4178 uNewEdx = pNewTSS32->edx;
4179 uNewEbx = pNewTSS32->ebx;
4180 uNewEsp = pNewTSS32->esp;
4181 uNewEbp = pNewTSS32->ebp;
4182 uNewEsi = pNewTSS32->esi;
4183 uNewEdi = pNewTSS32->edi;
4184 uNewES = pNewTSS32->es;
4185 uNewCS = pNewTSS32->cs;
4186 uNewSS = pNewTSS32->ss;
4187 uNewDS = pNewTSS32->ds;
4188 uNewFS = pNewTSS32->fs;
4189 uNewGS = pNewTSS32->gs;
4190 uNewLdt = pNewTSS32->selLdt;
4191 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4192 }
4193 else
4194 {
4195 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4196 uNewCr3 = 0;
4197 uNewEip = pNewTSS16->ip;
4198 uNewEflags = pNewTSS16->flags;
4199 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4200 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4201 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4202 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4203 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4204 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4205 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4206 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4207 uNewES = pNewTSS16->es;
4208 uNewCS = pNewTSS16->cs;
4209 uNewSS = pNewTSS16->ss;
4210 uNewDS = pNewTSS16->ds;
4211 uNewFS = 0;
4212 uNewGS = 0;
4213 uNewLdt = pNewTSS16->selLdt;
4214 fNewDebugTrap = false;
4215 }
4216
4217 if (GCPtrNewTSS == GCPtrCurTSS)
4218 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4219 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4220
4221 /*
4222 * We're done accessing the new TSS.
4223 */
4224 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4225 if (rcStrict != VINF_SUCCESS)
4226 {
4227 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4228 return rcStrict;
4229 }
4230
4231 /*
4232 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4233 */
4234 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4235 {
4236 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4237 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4241 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* Check that the descriptor indicates the new TSS is available (not busy). */
4246 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4247 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4248 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4249
4250 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4251 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4252 if (rcStrict != VINF_SUCCESS)
4253 {
4254 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4255 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4256 return rcStrict;
4257 }
4258 }
4259
4260 /*
4261 * From this point on, we're technically in the new task. We will defer exceptions
4262 * until the completion of the task switch but before executing any instructions in the new task.
4263 */
4264 pCtx->tr.Sel = SelTSS;
4265 pCtx->tr.ValidSel = SelTSS;
4266 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4267 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4268 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4269 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4270 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4271
4272 /* Set the busy bit in TR. */
4273 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4274 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4275 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4276 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4277 {
4278 uNewEflags |= X86_EFL_NT;
4279 }
4280
4281 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4282 pCtx->cr0 |= X86_CR0_TS;
4283 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4284
4285 pCtx->eip = uNewEip;
4286 pCtx->eax = uNewEax;
4287 pCtx->ecx = uNewEcx;
4288 pCtx->edx = uNewEdx;
4289 pCtx->ebx = uNewEbx;
4290 pCtx->esp = uNewEsp;
4291 pCtx->ebp = uNewEbp;
4292 pCtx->esi = uNewEsi;
4293 pCtx->edi = uNewEdi;
4294
4295 uNewEflags &= X86_EFL_LIVE_MASK;
4296 uNewEflags |= X86_EFL_RA1_MASK;
4297 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4298
4299 /*
4300 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4301 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4302 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4303 */
4304 pCtx->es.Sel = uNewES;
4305 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4306
4307 pCtx->cs.Sel = uNewCS;
4308 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4309
4310 pCtx->ss.Sel = uNewSS;
4311 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4312
4313 pCtx->ds.Sel = uNewDS;
4314 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4315
4316 pCtx->fs.Sel = uNewFS;
4317 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4318
4319 pCtx->gs.Sel = uNewGS;
4320 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4321 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4322
4323 pCtx->ldtr.Sel = uNewLdt;
4324 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4325 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4326 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4327
4328 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4329 {
4330 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4331 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4337 }
4338
4339 /*
4340 * Switch CR3 for the new task.
4341 */
4342 if ( fIsNewTSS386
4343 && (pCtx->cr0 & X86_CR0_PG))
4344 {
4345 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4346 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4347 {
4348 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4349 AssertRCSuccessReturn(rc, rc);
4350 }
4351 else
4352 pCtx->cr3 = uNewCr3;
4353
4354 /* Inform PGM. */
4355 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4356 {
4357 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4358 AssertRCReturn(rc, rc);
4359 /* ignore informational status codes */
4360 }
4361 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4362 }
4363
4364 /*
4365 * Switch LDTR for the new task.
4366 */
4367 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4368 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4369 else
4370 {
4371 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4372
4373 IEMSELDESC DescNewLdt;
4374 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4375 if (rcStrict != VINF_SUCCESS)
4376 {
4377 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4378 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4379 return rcStrict;
4380 }
4381 if ( !DescNewLdt.Legacy.Gen.u1Present
4382 || DescNewLdt.Legacy.Gen.u1DescType
4383 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4384 {
4385 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4386 uNewLdt, DescNewLdt.Legacy.u));
4387 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4388 }
4389
4390 pCtx->ldtr.ValidSel = uNewLdt;
4391 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4392 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4393 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4394 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4395 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4396 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4398 }
4399
4400 IEMSELDESC DescSS;
4401 if (IEM_IS_V86_MODE(pVCpu))
4402 {
4403 pVCpu->iem.s.uCpl = 3;
4404 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4405 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4406 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4407 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4408 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4409 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4410
4411 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4412 DescSS.Legacy.u = 0;
4413 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4414 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4415 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4416 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4417 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4418 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4419 DescSS.Legacy.Gen.u2Dpl = 3;
4420 }
4421 else
4422 {
4423 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4424
4425 /*
4426 * Load the stack segment for the new task.
4427 */
4428 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4429 {
4430 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4431 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4432 }
4433
4434 /* Fetch the descriptor. */
4435 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4436 if (rcStrict != VINF_SUCCESS)
4437 {
4438 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4439 VBOXSTRICTRC_VAL(rcStrict)));
4440 return rcStrict;
4441 }
4442
4443 /* SS must be a data segment and writable. */
4444 if ( !DescSS.Legacy.Gen.u1DescType
4445 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4446 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4447 {
4448 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4449 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4450 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4451 }
4452
4453 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4454 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4455 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4456 {
4457 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4458 uNewCpl));
4459 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 /* Is it there? */
4463 if (!DescSS.Legacy.Gen.u1Present)
4464 {
4465 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4466 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4467 }
4468
4469 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4470 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4471
4472 /* Set the accessed bit before committing the result into SS. */
4473 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4474 {
4475 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4476 if (rcStrict != VINF_SUCCESS)
4477 return rcStrict;
4478 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4479 }
4480
4481 /* Commit SS. */
4482 pCtx->ss.Sel = uNewSS;
4483 pCtx->ss.ValidSel = uNewSS;
4484 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4485 pCtx->ss.u32Limit = cbLimit;
4486 pCtx->ss.u64Base = u64Base;
4487 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4489
4490 /* CPL has changed, update IEM before loading rest of segments. */
4491 pVCpu->iem.s.uCpl = uNewCpl;
4492
4493 /*
4494 * Load the data segments for the new task.
4495 */
4496 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4497 if (rcStrict != VINF_SUCCESS)
4498 return rcStrict;
4499 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4500 if (rcStrict != VINF_SUCCESS)
4501 return rcStrict;
4502 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4503 if (rcStrict != VINF_SUCCESS)
4504 return rcStrict;
4505 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4506 if (rcStrict != VINF_SUCCESS)
4507 return rcStrict;
4508
4509 /*
4510 * Load the code segment for the new task.
4511 */
4512 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4513 {
4514 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4515 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4516 }
4517
4518 /* Fetch the descriptor. */
4519 IEMSELDESC DescCS;
4520 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4521 if (rcStrict != VINF_SUCCESS)
4522 {
4523 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4524 return rcStrict;
4525 }
4526
4527 /* CS must be a code segment. */
4528 if ( !DescCS.Legacy.Gen.u1DescType
4529 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4530 {
4531 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4532 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4533 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4534 }
4535
4536 /* For conforming CS, DPL must be less than or equal to the RPL. */
4537 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4538 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4539 {
4540 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4541 DescCS.Legacy.Gen.u2Dpl));
4542 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4543 }
4544
4545 /* For non-conforming CS, DPL must match RPL. */
4546 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4547 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4548 {
4549 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4550 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4551 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4552 }
4553
4554 /* Is it there? */
4555 if (!DescCS.Legacy.Gen.u1Present)
4556 {
4557 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4558 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4559 }
4560
4561 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4562 u64Base = X86DESC_BASE(&DescCS.Legacy);
4563
4564 /* Set the accessed bit before committing the result into CS. */
4565 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4566 {
4567 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4568 if (rcStrict != VINF_SUCCESS)
4569 return rcStrict;
4570 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4571 }
4572
4573 /* Commit CS. */
4574 pCtx->cs.Sel = uNewCS;
4575 pCtx->cs.ValidSel = uNewCS;
4576 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4577 pCtx->cs.u32Limit = cbLimit;
4578 pCtx->cs.u64Base = u64Base;
4579 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4581 }
4582
4583 /** @todo Debug trap. */
4584 if (fIsNewTSS386 && fNewDebugTrap)
4585 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4586
4587 /*
4588 * Construct the error code masks based on what caused this task switch.
4589 * See Intel Instruction reference for INT.
4590 */
4591 uint16_t uExt;
4592 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4593 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4594 {
4595 uExt = 1;
4596 }
4597 else
4598 uExt = 0;
4599
4600 /*
4601 * Push any error code on to the new stack.
4602 */
4603 if (fFlags & IEM_XCPT_FLAGS_ERR)
4604 {
4605 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4606 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4607 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4608
4609 /* Check that there is sufficient space on the stack. */
4610 /** @todo Factor out segment limit checking for normal/expand down segments
4611 * into a separate function. */
4612 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4613 {
4614 if ( pCtx->esp - 1 > cbLimitSS
4615 || pCtx->esp < cbStackFrame)
4616 {
4617 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4618 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4619 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4620 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4621 }
4622 }
4623 else
4624 {
4625 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4626 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4627 {
4628 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4629 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4630 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4631 }
4632 }
4633
4634
4635 if (fIsNewTSS386)
4636 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4637 else
4638 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4639 if (rcStrict != VINF_SUCCESS)
4640 {
4641 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4642 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4643 return rcStrict;
4644 }
4645 }
4646
4647 /* Check the new EIP against the new CS limit. */
4648 if (pCtx->eip > pCtx->cs.u32Limit)
4649 {
4650 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4651 pCtx->eip, pCtx->cs.u32Limit));
4652 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4653 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4654 }
4655
4656 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4657 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4658}
4659
4660
4661/**
4662 * Implements exceptions and interrupts for protected mode.
4663 *
4664 * @returns VBox strict status code.
4665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4666 * @param pCtx The CPU context.
4667 * @param cbInstr The number of bytes to offset rIP by in the return
4668 * address.
4669 * @param u8Vector The interrupt / exception vector number.
4670 * @param fFlags The flags.
4671 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4672 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4673 */
4674IEM_STATIC VBOXSTRICTRC
4675iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4676 PCPUMCTX pCtx,
4677 uint8_t cbInstr,
4678 uint8_t u8Vector,
4679 uint32_t fFlags,
4680 uint16_t uErr,
4681 uint64_t uCr2)
4682{
4683 /*
4684 * Read the IDT entry.
4685 */
4686 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4687 {
4688 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4689 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4690 }
4691 X86DESC Idte;
4692 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4693 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4694 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4695 return rcStrict;
4696 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4697 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4698 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4699
4700 /*
4701 * Check the descriptor type, DPL and such.
4702 * ASSUMES this is done in the same order as described for call-gate calls.
4703 */
4704 if (Idte.Gate.u1DescType)
4705 {
4706 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4707 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4708 }
4709 bool fTaskGate = false;
4710 uint8_t f32BitGate = true;
4711 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4712 switch (Idte.Gate.u4Type)
4713 {
4714 case X86_SEL_TYPE_SYS_UNDEFINED:
4715 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4716 case X86_SEL_TYPE_SYS_LDT:
4717 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4718 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4719 case X86_SEL_TYPE_SYS_UNDEFINED2:
4720 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4721 case X86_SEL_TYPE_SYS_UNDEFINED3:
4722 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4723 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4724 case X86_SEL_TYPE_SYS_UNDEFINED4:
4725 {
4726 /** @todo check what actually happens when the type is wrong...
4727 * esp. call gates. */
4728 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4729 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4730 }
4731
4732 case X86_SEL_TYPE_SYS_286_INT_GATE:
4733 f32BitGate = false;
4734 RT_FALL_THRU();
4735 case X86_SEL_TYPE_SYS_386_INT_GATE:
4736 fEflToClear |= X86_EFL_IF;
4737 break;
4738
4739 case X86_SEL_TYPE_SYS_TASK_GATE:
4740 fTaskGate = true;
4741#ifndef IEM_IMPLEMENTS_TASKSWITCH
4742 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4743#endif
4744 break;
4745
4746 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4747 f32BitGate = false;
4748 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4749 break;
4750
4751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4752 }
4753
4754 /* Check DPL against CPL if applicable. */
4755 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4756 {
4757 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4758 {
4759 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4760 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4761 }
4762 }
4763
4764 /* Is it there? */
4765 if (!Idte.Gate.u1Present)
4766 {
4767 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4768 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4769 }
4770
4771 /* Is it a task-gate? */
4772 if (fTaskGate)
4773 {
4774 /*
4775 * Construct the error code masks based on what caused this task switch.
4776 * See Intel Instruction reference for INT.
4777 */
4778 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4779 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4780 RTSEL SelTSS = Idte.Gate.u16Sel;
4781
4782 /*
4783 * Fetch the TSS descriptor in the GDT.
4784 */
4785 IEMSELDESC DescTSS;
4786 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4787 if (rcStrict != VINF_SUCCESS)
4788 {
4789 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4790 VBOXSTRICTRC_VAL(rcStrict)));
4791 return rcStrict;
4792 }
4793
4794 /* The TSS descriptor must be a system segment and be available (not busy). */
4795 if ( DescTSS.Legacy.Gen.u1DescType
4796 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4797 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4798 {
4799 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4800 u8Vector, SelTSS, DescTSS.Legacy.au64));
4801 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4802 }
4803
4804 /* The TSS must be present. */
4805 if (!DescTSS.Legacy.Gen.u1Present)
4806 {
4807 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4808 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4809 }
4810
4811 /* Do the actual task switch. */
4812 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4813 }
4814
4815 /* A null CS is bad. */
4816 RTSEL NewCS = Idte.Gate.u16Sel;
4817 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4818 {
4819 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4820 return iemRaiseGeneralProtectionFault0(pVCpu);
4821 }
4822
4823 /* Fetch the descriptor for the new CS. */
4824 IEMSELDESC DescCS;
4825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4826 if (rcStrict != VINF_SUCCESS)
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4829 return rcStrict;
4830 }
4831
4832 /* Must be a code segment. */
4833 if (!DescCS.Legacy.Gen.u1DescType)
4834 {
4835 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4836 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4837 }
4838 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4839 {
4840 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4841 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4842 }
4843
4844 /* Don't allow lowering the privilege level. */
4845 /** @todo Does the lowering of privileges apply to software interrupts
4846 * only? This has bearings on the more-privileged or
4847 * same-privilege stack behavior further down. A testcase would
4848 * be nice. */
4849 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4850 {
4851 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4852 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4853 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4854 }
4855
4856 /* Make sure the selector is present. */
4857 if (!DescCS.Legacy.Gen.u1Present)
4858 {
4859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4860 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4861 }
4862
4863 /* Check the new EIP against the new CS limit. */
4864 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4865 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4866 ? Idte.Gate.u16OffsetLow
4867 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4868 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4869 if (uNewEip > cbLimitCS)
4870 {
4871 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4872 u8Vector, uNewEip, cbLimitCS, NewCS));
4873 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4874 }
4875 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4876
4877 /* Calc the flag image to push. */
4878 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4879 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4880 fEfl &= ~X86_EFL_RF;
4881 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4882 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4883
4884 /* From V8086 mode only go to CPL 0. */
4885 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4886 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4887 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4888 {
4889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4890 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4891 }
4892
4893 /*
4894 * If the privilege level changes, we need to get a new stack from the TSS.
4895 * This in turns means validating the new SS and ESP...
4896 */
4897 if (uNewCpl != pVCpu->iem.s.uCpl)
4898 {
4899 RTSEL NewSS;
4900 uint32_t uNewEsp;
4901 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4902 if (rcStrict != VINF_SUCCESS)
4903 return rcStrict;
4904
4905 IEMSELDESC DescSS;
4906 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4907 if (rcStrict != VINF_SUCCESS)
4908 return rcStrict;
4909 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4910 if (!DescSS.Legacy.Gen.u1DefBig)
4911 {
4912 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4913 uNewEsp = (uint16_t)uNewEsp;
4914 }
4915
4916 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4917
4918 /* Check that there is sufficient space for the stack frame. */
4919 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4920 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4921 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4922 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4923
4924 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4925 {
4926 if ( uNewEsp - 1 > cbLimitSS
4927 || uNewEsp < cbStackFrame)
4928 {
4929 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4930 u8Vector, NewSS, uNewEsp, cbStackFrame));
4931 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4932 }
4933 }
4934 else
4935 {
4936 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4937 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4938 {
4939 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4940 u8Vector, NewSS, uNewEsp, cbStackFrame));
4941 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4942 }
4943 }
4944
4945 /*
4946 * Start making changes.
4947 */
4948
4949 /* Set the new CPL so that stack accesses use it. */
4950 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4951 pVCpu->iem.s.uCpl = uNewCpl;
4952
4953 /* Create the stack frame. */
4954 RTPTRUNION uStackFrame;
4955 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4956 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4957 if (rcStrict != VINF_SUCCESS)
4958 return rcStrict;
4959 void * const pvStackFrame = uStackFrame.pv;
4960 if (f32BitGate)
4961 {
4962 if (fFlags & IEM_XCPT_FLAGS_ERR)
4963 *uStackFrame.pu32++ = uErr;
4964 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4965 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4966 uStackFrame.pu32[2] = fEfl;
4967 uStackFrame.pu32[3] = pCtx->esp;
4968 uStackFrame.pu32[4] = pCtx->ss.Sel;
4969 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4970 if (fEfl & X86_EFL_VM)
4971 {
4972 uStackFrame.pu32[1] = pCtx->cs.Sel;
4973 uStackFrame.pu32[5] = pCtx->es.Sel;
4974 uStackFrame.pu32[6] = pCtx->ds.Sel;
4975 uStackFrame.pu32[7] = pCtx->fs.Sel;
4976 uStackFrame.pu32[8] = pCtx->gs.Sel;
4977 }
4978 }
4979 else
4980 {
4981 if (fFlags & IEM_XCPT_FLAGS_ERR)
4982 *uStackFrame.pu16++ = uErr;
4983 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4984 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4985 uStackFrame.pu16[2] = fEfl;
4986 uStackFrame.pu16[3] = pCtx->sp;
4987 uStackFrame.pu16[4] = pCtx->ss.Sel;
4988 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4989 if (fEfl & X86_EFL_VM)
4990 {
4991 uStackFrame.pu16[1] = pCtx->cs.Sel;
4992 uStackFrame.pu16[5] = pCtx->es.Sel;
4993 uStackFrame.pu16[6] = pCtx->ds.Sel;
4994 uStackFrame.pu16[7] = pCtx->fs.Sel;
4995 uStackFrame.pu16[8] = pCtx->gs.Sel;
4996 }
4997 }
4998 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4999 if (rcStrict != VINF_SUCCESS)
5000 return rcStrict;
5001
5002 /* Mark the selectors 'accessed' (hope this is the correct time). */
5003 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5004 * after pushing the stack frame? (Write protect the gdt + stack to
5005 * find out.) */
5006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5007 {
5008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5009 if (rcStrict != VINF_SUCCESS)
5010 return rcStrict;
5011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5012 }
5013
5014 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5015 {
5016 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5017 if (rcStrict != VINF_SUCCESS)
5018 return rcStrict;
5019 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5020 }
5021
5022 /*
5023 * Start comitting the register changes (joins with the DPL=CPL branch).
5024 */
5025 pCtx->ss.Sel = NewSS;
5026 pCtx->ss.ValidSel = NewSS;
5027 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5028 pCtx->ss.u32Limit = cbLimitSS;
5029 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5030 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5031 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5032 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5033 * SP is loaded).
5034 * Need to check the other combinations too:
5035 * - 16-bit TSS, 32-bit handler
5036 * - 32-bit TSS, 16-bit handler */
5037 if (!pCtx->ss.Attr.n.u1DefBig)
5038 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5039 else
5040 pCtx->rsp = uNewEsp - cbStackFrame;
5041
5042 if (fEfl & X86_EFL_VM)
5043 {
5044 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5045 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5048 }
5049 }
5050 /*
5051 * Same privilege, no stack change and smaller stack frame.
5052 */
5053 else
5054 {
5055 uint64_t uNewRsp;
5056 RTPTRUNION uStackFrame;
5057 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5058 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5059 if (rcStrict != VINF_SUCCESS)
5060 return rcStrict;
5061 void * const pvStackFrame = uStackFrame.pv;
5062
5063 if (f32BitGate)
5064 {
5065 if (fFlags & IEM_XCPT_FLAGS_ERR)
5066 *uStackFrame.pu32++ = uErr;
5067 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5068 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5069 uStackFrame.pu32[2] = fEfl;
5070 }
5071 else
5072 {
5073 if (fFlags & IEM_XCPT_FLAGS_ERR)
5074 *uStackFrame.pu16++ = uErr;
5075 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5076 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5077 uStackFrame.pu16[2] = fEfl;
5078 }
5079 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5080 if (rcStrict != VINF_SUCCESS)
5081 return rcStrict;
5082
5083 /* Mark the CS selector as 'accessed'. */
5084 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5085 {
5086 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5087 if (rcStrict != VINF_SUCCESS)
5088 return rcStrict;
5089 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5090 }
5091
5092 /*
5093 * Start committing the register changes (joins with the other branch).
5094 */
5095 pCtx->rsp = uNewRsp;
5096 }
5097
5098 /* ... register committing continues. */
5099 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5100 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5101 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5102 pCtx->cs.u32Limit = cbLimitCS;
5103 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5104 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5105
5106 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5107 fEfl &= ~fEflToClear;
5108 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5109
5110 if (fFlags & IEM_XCPT_FLAGS_CR2)
5111 pCtx->cr2 = uCr2;
5112
5113 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5114 iemRaiseXcptAdjustState(pCtx, u8Vector);
5115
5116 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5117}
5118
5119
5120/**
5121 * Implements exceptions and interrupts for long mode.
5122 *
5123 * @returns VBox strict status code.
5124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5125 * @param pCtx The CPU context.
5126 * @param cbInstr The number of bytes to offset rIP by in the return
5127 * address.
5128 * @param u8Vector The interrupt / exception vector number.
5129 * @param fFlags The flags.
5130 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5131 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5132 */
5133IEM_STATIC VBOXSTRICTRC
5134iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5135 PCPUMCTX pCtx,
5136 uint8_t cbInstr,
5137 uint8_t u8Vector,
5138 uint32_t fFlags,
5139 uint16_t uErr,
5140 uint64_t uCr2)
5141{
5142 /*
5143 * Read the IDT entry.
5144 */
5145 uint16_t offIdt = (uint16_t)u8Vector << 4;
5146 if (pCtx->idtr.cbIdt < offIdt + 7)
5147 {
5148 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5149 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5150 }
5151 X86DESC64 Idte;
5152 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5153 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5154 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5155 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5156 return rcStrict;
5157 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5158 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5159 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5160
5161 /*
5162 * Check the descriptor type, DPL and such.
5163 * ASSUMES this is done in the same order as described for call-gate calls.
5164 */
5165 if (Idte.Gate.u1DescType)
5166 {
5167 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5168 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5169 }
5170 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5171 switch (Idte.Gate.u4Type)
5172 {
5173 case AMD64_SEL_TYPE_SYS_INT_GATE:
5174 fEflToClear |= X86_EFL_IF;
5175 break;
5176 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5177 break;
5178
5179 default:
5180 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5181 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5182 }
5183
5184 /* Check DPL against CPL if applicable. */
5185 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5186 {
5187 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5188 {
5189 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5190 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5191 }
5192 }
5193
5194 /* Is it there? */
5195 if (!Idte.Gate.u1Present)
5196 {
5197 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5198 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5199 }
5200
5201 /* A null CS is bad. */
5202 RTSEL NewCS = Idte.Gate.u16Sel;
5203 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5204 {
5205 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5206 return iemRaiseGeneralProtectionFault0(pVCpu);
5207 }
5208
5209 /* Fetch the descriptor for the new CS. */
5210 IEMSELDESC DescCS;
5211 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5212 if (rcStrict != VINF_SUCCESS)
5213 {
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5215 return rcStrict;
5216 }
5217
5218 /* Must be a 64-bit code segment. */
5219 if (!DescCS.Long.Gen.u1DescType)
5220 {
5221 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5222 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5223 }
5224 if ( !DescCS.Long.Gen.u1Long
5225 || DescCS.Long.Gen.u1DefBig
5226 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5227 {
5228 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5229 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5230 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5231 }
5232
5233 /* Don't allow lowering the privilege level. For non-conforming CS
5234 selectors, the CS.DPL sets the privilege level the trap/interrupt
5235 handler runs at. For conforming CS selectors, the CPL remains
5236 unchanged, but the CS.DPL must be <= CPL. */
5237 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5238 * when CPU in Ring-0. Result \#GP? */
5239 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5242 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5243 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5244 }
5245
5246
5247 /* Make sure the selector is present. */
5248 if (!DescCS.Legacy.Gen.u1Present)
5249 {
5250 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5251 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5252 }
5253
5254 /* Check that the new RIP is canonical. */
5255 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5256 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5257 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5258 if (!IEM_IS_CANONICAL(uNewRip))
5259 {
5260 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5261 return iemRaiseGeneralProtectionFault0(pVCpu);
5262 }
5263
5264 /*
5265 * If the privilege level changes or if the IST isn't zero, we need to get
5266 * a new stack from the TSS.
5267 */
5268 uint64_t uNewRsp;
5269 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5270 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5271 if ( uNewCpl != pVCpu->iem.s.uCpl
5272 || Idte.Gate.u3IST != 0)
5273 {
5274 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5275 if (rcStrict != VINF_SUCCESS)
5276 return rcStrict;
5277 }
5278 else
5279 uNewRsp = pCtx->rsp;
5280 uNewRsp &= ~(uint64_t)0xf;
5281
5282 /*
5283 * Calc the flag image to push.
5284 */
5285 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5286 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5287 fEfl &= ~X86_EFL_RF;
5288 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5289 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5290
5291 /*
5292 * Start making changes.
5293 */
5294 /* Set the new CPL so that stack accesses use it. */
5295 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5296 pVCpu->iem.s.uCpl = uNewCpl;
5297
5298 /* Create the stack frame. */
5299 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5300 RTPTRUNION uStackFrame;
5301 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5302 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5303 if (rcStrict != VINF_SUCCESS)
5304 return rcStrict;
5305 void * const pvStackFrame = uStackFrame.pv;
5306
5307 if (fFlags & IEM_XCPT_FLAGS_ERR)
5308 *uStackFrame.pu64++ = uErr;
5309 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5310 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5311 uStackFrame.pu64[2] = fEfl;
5312 uStackFrame.pu64[3] = pCtx->rsp;
5313 uStackFrame.pu64[4] = pCtx->ss.Sel;
5314 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5315 if (rcStrict != VINF_SUCCESS)
5316 return rcStrict;
5317
5318 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5319 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5320 * after pushing the stack frame? (Write protect the gdt + stack to
5321 * find out.) */
5322 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5323 {
5324 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5325 if (rcStrict != VINF_SUCCESS)
5326 return rcStrict;
5327 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5328 }
5329
5330 /*
5331 * Start comitting the register changes.
5332 */
5333 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5334 * hidden registers when interrupting 32-bit or 16-bit code! */
5335 if (uNewCpl != uOldCpl)
5336 {
5337 pCtx->ss.Sel = 0 | uNewCpl;
5338 pCtx->ss.ValidSel = 0 | uNewCpl;
5339 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5340 pCtx->ss.u32Limit = UINT32_MAX;
5341 pCtx->ss.u64Base = 0;
5342 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5343 }
5344 pCtx->rsp = uNewRsp - cbStackFrame;
5345 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5346 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5347 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5348 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5349 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5350 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5351 pCtx->rip = uNewRip;
5352
5353 fEfl &= ~fEflToClear;
5354 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5355
5356 if (fFlags & IEM_XCPT_FLAGS_CR2)
5357 pCtx->cr2 = uCr2;
5358
5359 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5360 iemRaiseXcptAdjustState(pCtx, u8Vector);
5361
5362 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5363}
5364
5365
5366/**
5367 * Implements exceptions and interrupts.
5368 *
5369 * All exceptions and interrupts goes thru this function!
5370 *
5371 * @returns VBox strict status code.
5372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5373 * @param cbInstr The number of bytes to offset rIP by in the return
5374 * address.
5375 * @param u8Vector The interrupt / exception vector number.
5376 * @param fFlags The flags.
5377 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5378 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5379 */
5380DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5381iemRaiseXcptOrInt(PVMCPU pVCpu,
5382 uint8_t cbInstr,
5383 uint8_t u8Vector,
5384 uint32_t fFlags,
5385 uint16_t uErr,
5386 uint64_t uCr2)
5387{
5388 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5389#ifdef IN_RING0
5390 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5391 AssertRCReturn(rc, rc);
5392#endif
5393
5394#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5395 /*
5396 * Flush prefetch buffer
5397 */
5398 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5399#endif
5400
5401 /*
5402 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5403 */
5404 if ( pCtx->eflags.Bits.u1VM
5405 && pCtx->eflags.Bits.u2IOPL != 3
5406 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5407 && (pCtx->cr0 & X86_CR0_PE) )
5408 {
5409 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5410 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5411 u8Vector = X86_XCPT_GP;
5412 uErr = 0;
5413 }
5414#ifdef DBGFTRACE_ENABLED
5415 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5416 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5417 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5418#endif
5419
5420#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5421 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5422 {
5423 /*
5424 * If the event is being injected as part of VMRUN, it isn't subject to event
5425 * intercepts in the nested-guest. However, secondary exceptions that occur
5426 * during injection of any event -are- subject to exception intercepts.
5427 * See AMD spec. 15.20 "Event Injection".
5428 */
5429 if (!pCtx->hwvirt.svm.fInterceptEvents)
5430 pCtx->hwvirt.svm.fInterceptEvents = 1;
5431 else
5432 {
5433 /*
5434 * Check and handle if the event being raised is intercepted.
5435 */
5436 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5437 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5438 return rcStrict0;
5439 }
5440 }
5441#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5442
5443 /*
5444 * Do recursion accounting.
5445 */
5446 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5447 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5448 if (pVCpu->iem.s.cXcptRecursions == 0)
5449 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5450 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5451 else
5452 {
5453 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5454 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5455 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5456
5457 if (pVCpu->iem.s.cXcptRecursions >= 3)
5458 {
5459#ifdef DEBUG_bird
5460 AssertFailed();
5461#endif
5462 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5463 }
5464
5465 /*
5466 * Evaluate the sequence of recurring events.
5467 */
5468 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5469 NULL /* pXcptRaiseInfo */);
5470 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5471 { /* likely */ }
5472 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5473 {
5474 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5475 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5476 u8Vector = X86_XCPT_DF;
5477 uErr = 0;
5478 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5479 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5480 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5481 }
5482 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5483 {
5484 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5485 return iemInitiateCpuShutdown(pVCpu);
5486 }
5487 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5488 {
5489 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5490 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5491 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5492 return VERR_EM_GUEST_CPU_HANG;
5493 }
5494 else
5495 {
5496 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5497 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5498 return VERR_IEM_IPE_9;
5499 }
5500
5501 /*
5502 * The 'EXT' bit is set when an exception occurs during deliver of an external
5503 * event (such as an interrupt or earlier exception)[1]. Privileged software
5504 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5505 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5506 *
5507 * [1] - Intel spec. 6.13 "Error Code"
5508 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5509 * [3] - Intel Instruction reference for INT n.
5510 */
5511 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5512 && (fFlags & IEM_XCPT_FLAGS_ERR)
5513 && u8Vector != X86_XCPT_PF
5514 && u8Vector != X86_XCPT_DF)
5515 {
5516 uErr |= X86_TRAP_ERR_EXTERNAL;
5517 }
5518 }
5519
5520 pVCpu->iem.s.cXcptRecursions++;
5521 pVCpu->iem.s.uCurXcpt = u8Vector;
5522 pVCpu->iem.s.fCurXcpt = fFlags;
5523 pVCpu->iem.s.uCurXcptErr = uErr;
5524 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5525
5526 /*
5527 * Extensive logging.
5528 */
5529#if defined(LOG_ENABLED) && defined(IN_RING3)
5530 if (LogIs3Enabled())
5531 {
5532 PVM pVM = pVCpu->CTX_SUFF(pVM);
5533 char szRegs[4096];
5534 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5535 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5536 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5537 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5538 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5539 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5540 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5541 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5542 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5543 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5544 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5545 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5546 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5547 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5548 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5549 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5550 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5551 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5552 " efer=%016VR{efer}\n"
5553 " pat=%016VR{pat}\n"
5554 " sf_mask=%016VR{sf_mask}\n"
5555 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5556 " lstar=%016VR{lstar}\n"
5557 " star=%016VR{star} cstar=%016VR{cstar}\n"
5558 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5559 );
5560
5561 char szInstr[256];
5562 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5563 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5564 szInstr, sizeof(szInstr), NULL);
5565 Log3(("%s%s\n", szRegs, szInstr));
5566 }
5567#endif /* LOG_ENABLED */
5568
5569 /*
5570 * Call the mode specific worker function.
5571 */
5572 VBOXSTRICTRC rcStrict;
5573 if (!(pCtx->cr0 & X86_CR0_PE))
5574 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5575 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5576 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5577 else
5578 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5579
5580 /* Flush the prefetch buffer. */
5581#ifdef IEM_WITH_CODE_TLB
5582 pVCpu->iem.s.pbInstrBuf = NULL;
5583#else
5584 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5585#endif
5586
5587 /*
5588 * Unwind.
5589 */
5590 pVCpu->iem.s.cXcptRecursions--;
5591 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5592 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5593 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5594 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5595 return rcStrict;
5596}
5597
5598#ifdef IEM_WITH_SETJMP
5599/**
5600 * See iemRaiseXcptOrInt. Will not return.
5601 */
5602IEM_STATIC DECL_NO_RETURN(void)
5603iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5604 uint8_t cbInstr,
5605 uint8_t u8Vector,
5606 uint32_t fFlags,
5607 uint16_t uErr,
5608 uint64_t uCr2)
5609{
5610 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5611 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5612}
5613#endif
5614
5615
5616/** \#DE - 00. */
5617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5618{
5619 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5620}
5621
5622
5623/** \#DB - 01.
5624 * @note This automatically clear DR7.GD. */
5625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5626{
5627 /** @todo set/clear RF. */
5628 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5629 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5630}
5631
5632
5633/** \#BR - 05. */
5634DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5635{
5636 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5637}
5638
5639
5640/** \#UD - 06. */
5641DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5642{
5643 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5644}
5645
5646
5647/** \#NM - 07. */
5648DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5649{
5650 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5651}
5652
5653
5654/** \#TS(err) - 0a. */
5655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5656{
5657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5658}
5659
5660
5661/** \#TS(tr) - 0a. */
5662DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5663{
5664 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5665 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5666}
5667
5668
5669/** \#TS(0) - 0a. */
5670DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5671{
5672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5673 0, 0);
5674}
5675
5676
5677/** \#TS(err) - 0a. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5681 uSel & X86_SEL_MASK_OFF_RPL, 0);
5682}
5683
5684
5685/** \#NP(err) - 0b. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5689}
5690
5691
5692/** \#NP(sel) - 0b. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5696 uSel & ~X86_SEL_RPL, 0);
5697}
5698
5699
5700/** \#SS(seg) - 0c. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5704 uSel & ~X86_SEL_RPL, 0);
5705}
5706
5707
5708/** \#SS(err) - 0c. */
5709DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5710{
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5712}
5713
5714
5715/** \#GP(n) - 0d. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5719}
5720
5721
5722/** \#GP(0) - 0d. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5726}
5727
5728#ifdef IEM_WITH_SETJMP
5729/** \#GP(0) - 0d. */
5730DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5731{
5732 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5733}
5734#endif
5735
5736
5737/** \#GP(sel) - 0d. */
5738DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5739{
5740 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5741 Sel & ~X86_SEL_RPL, 0);
5742}
5743
5744
5745/** \#GP(0) - 0d. */
5746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5747{
5748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5749}
5750
5751
5752/** \#GP(sel) - 0d. */
5753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5754{
5755 NOREF(iSegReg); NOREF(fAccess);
5756 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5757 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5758}
5759
5760#ifdef IEM_WITH_SETJMP
5761/** \#GP(sel) - 0d, longjmp. */
5762DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5763{
5764 NOREF(iSegReg); NOREF(fAccess);
5765 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5766 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5767}
5768#endif
5769
5770/** \#GP(sel) - 0d. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5772{
5773 NOREF(Sel);
5774 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5775}
5776
5777#ifdef IEM_WITH_SETJMP
5778/** \#GP(sel) - 0d, longjmp. */
5779DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5780{
5781 NOREF(Sel);
5782 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5783}
5784#endif
5785
5786
5787/** \#GP(sel) - 0d. */
5788DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5789{
5790 NOREF(iSegReg); NOREF(fAccess);
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793
5794#ifdef IEM_WITH_SETJMP
5795/** \#GP(sel) - 0d, longjmp. */
5796DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5797 uint32_t fAccess)
5798{
5799 NOREF(iSegReg); NOREF(fAccess);
5800 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5801}
5802#endif
5803
5804
5805/** \#PF(n) - 0e. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5807{
5808 uint16_t uErr;
5809 switch (rc)
5810 {
5811 case VERR_PAGE_NOT_PRESENT:
5812 case VERR_PAGE_TABLE_NOT_PRESENT:
5813 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5814 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5815 uErr = 0;
5816 break;
5817
5818 default:
5819 AssertMsgFailed(("%Rrc\n", rc));
5820 RT_FALL_THRU();
5821 case VERR_ACCESS_DENIED:
5822 uErr = X86_TRAP_PF_P;
5823 break;
5824
5825 /** @todo reserved */
5826 }
5827
5828 if (pVCpu->iem.s.uCpl == 3)
5829 uErr |= X86_TRAP_PF_US;
5830
5831 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5832 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5833 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5834 uErr |= X86_TRAP_PF_ID;
5835
5836#if 0 /* This is so much non-sense, really. Why was it done like that? */
5837 /* Note! RW access callers reporting a WRITE protection fault, will clear
5838 the READ flag before calling. So, read-modify-write accesses (RW)
5839 can safely be reported as READ faults. */
5840 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5841 uErr |= X86_TRAP_PF_RW;
5842#else
5843 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5844 {
5845 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5846 uErr |= X86_TRAP_PF_RW;
5847 }
5848#endif
5849
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5851 uErr, GCPtrWhere);
5852}
5853
5854#ifdef IEM_WITH_SETJMP
5855/** \#PF(n) - 0e, longjmp. */
5856IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5857{
5858 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5859}
5860#endif
5861
5862
5863/** \#MF(0) - 10. */
5864DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5865{
5866 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5867}
5868
5869
5870/** \#AC(0) - 11. */
5871DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5872{
5873 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5874}
5875
5876
5877/**
5878 * Macro for calling iemCImplRaiseDivideError().
5879 *
5880 * This enables us to add/remove arguments and force different levels of
5881 * inlining as we wish.
5882 *
5883 * @return Strict VBox status code.
5884 */
5885#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5886IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5887{
5888 NOREF(cbInstr);
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5890}
5891
5892
5893/**
5894 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5895 *
5896 * This enables us to add/remove arguments and force different levels of
5897 * inlining as we wish.
5898 *
5899 * @return Strict VBox status code.
5900 */
5901#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5902IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5903{
5904 NOREF(cbInstr);
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5906}
5907
5908
5909/**
5910 * Macro for calling iemCImplRaiseInvalidOpcode().
5911 *
5912 * This enables us to add/remove arguments and force different levels of
5913 * inlining as we wish.
5914 *
5915 * @return Strict VBox status code.
5916 */
5917#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5918IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5919{
5920 NOREF(cbInstr);
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5922}
5923
5924
5925/** @} */
5926
5927
5928/*
5929 *
5930 * Helpers routines.
5931 * Helpers routines.
5932 * Helpers routines.
5933 *
5934 */
5935
5936/**
5937 * Recalculates the effective operand size.
5938 *
5939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5940 */
5941IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5942{
5943 switch (pVCpu->iem.s.enmCpuMode)
5944 {
5945 case IEMMODE_16BIT:
5946 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5947 break;
5948 case IEMMODE_32BIT:
5949 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5950 break;
5951 case IEMMODE_64BIT:
5952 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5953 {
5954 case 0:
5955 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5956 break;
5957 case IEM_OP_PRF_SIZE_OP:
5958 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5959 break;
5960 case IEM_OP_PRF_SIZE_REX_W:
5961 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5962 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5963 break;
5964 }
5965 break;
5966 default:
5967 AssertFailed();
5968 }
5969}
5970
5971
5972/**
5973 * Sets the default operand size to 64-bit and recalculates the effective
5974 * operand size.
5975 *
5976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5977 */
5978IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5979{
5980 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5981 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5982 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5983 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5984 else
5985 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5986}
5987
5988
5989/*
5990 *
5991 * Common opcode decoders.
5992 * Common opcode decoders.
5993 * Common opcode decoders.
5994 *
5995 */
5996//#include <iprt/mem.h>
5997
5998/**
5999 * Used to add extra details about a stub case.
6000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6001 */
6002IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6003{
6004#if defined(LOG_ENABLED) && defined(IN_RING3)
6005 PVM pVM = pVCpu->CTX_SUFF(pVM);
6006 char szRegs[4096];
6007 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6008 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6009 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6010 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6011 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6012 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6013 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6014 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6015 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6016 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6017 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6018 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6019 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6020 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6021 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6022 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6023 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6024 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6025 " efer=%016VR{efer}\n"
6026 " pat=%016VR{pat}\n"
6027 " sf_mask=%016VR{sf_mask}\n"
6028 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6029 " lstar=%016VR{lstar}\n"
6030 " star=%016VR{star} cstar=%016VR{cstar}\n"
6031 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6032 );
6033
6034 char szInstr[256];
6035 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6036 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6037 szInstr, sizeof(szInstr), NULL);
6038
6039 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6040#else
6041 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6042#endif
6043}
6044
6045/**
6046 * Complains about a stub.
6047 *
6048 * Providing two versions of this macro, one for daily use and one for use when
6049 * working on IEM.
6050 */
6051#if 0
6052# define IEMOP_BITCH_ABOUT_STUB() \
6053 do { \
6054 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6055 iemOpStubMsg2(pVCpu); \
6056 RTAssertPanic(); \
6057 } while (0)
6058#else
6059# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6060#endif
6061
6062/** Stubs an opcode. */
6063#define FNIEMOP_STUB(a_Name) \
6064 FNIEMOP_DEF(a_Name) \
6065 { \
6066 RT_NOREF_PV(pVCpu); \
6067 IEMOP_BITCH_ABOUT_STUB(); \
6068 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6069 } \
6070 typedef int ignore_semicolon
6071
6072/** Stubs an opcode. */
6073#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6074 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6075 { \
6076 RT_NOREF_PV(pVCpu); \
6077 RT_NOREF_PV(a_Name0); \
6078 IEMOP_BITCH_ABOUT_STUB(); \
6079 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6080 } \
6081 typedef int ignore_semicolon
6082
6083/** Stubs an opcode which currently should raise \#UD. */
6084#define FNIEMOP_UD_STUB(a_Name) \
6085 FNIEMOP_DEF(a_Name) \
6086 { \
6087 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6088 return IEMOP_RAISE_INVALID_OPCODE(); \
6089 } \
6090 typedef int ignore_semicolon
6091
6092/** Stubs an opcode which currently should raise \#UD. */
6093#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6094 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6095 { \
6096 RT_NOREF_PV(pVCpu); \
6097 RT_NOREF_PV(a_Name0); \
6098 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6099 return IEMOP_RAISE_INVALID_OPCODE(); \
6100 } \
6101 typedef int ignore_semicolon
6102
6103
6104
6105/** @name Register Access.
6106 * @{
6107 */
6108
6109/**
6110 * Gets a reference (pointer) to the specified hidden segment register.
6111 *
6112 * @returns Hidden register reference.
6113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6114 * @param iSegReg The segment register.
6115 */
6116IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6117{
6118 Assert(iSegReg < X86_SREG_COUNT);
6119 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6120 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6121
6122#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6123 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6124 { /* likely */ }
6125 else
6126 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6127#else
6128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6129#endif
6130 return pSReg;
6131}
6132
6133
6134/**
6135 * Ensures that the given hidden segment register is up to date.
6136 *
6137 * @returns Hidden register reference.
6138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6139 * @param pSReg The segment register.
6140 */
6141IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6142{
6143#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6144 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6145 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6146#else
6147 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6148 NOREF(pVCpu);
6149#endif
6150 return pSReg;
6151}
6152
6153
6154/**
6155 * Gets a reference (pointer) to the specified segment register (the selector
6156 * value).
6157 *
6158 * @returns Pointer to the selector variable.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iSegReg The segment register.
6161 */
6162DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6163{
6164 Assert(iSegReg < X86_SREG_COUNT);
6165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6166 return &pCtx->aSRegs[iSegReg].Sel;
6167}
6168
6169
6170/**
6171 * Fetches the selector value of a segment register.
6172 *
6173 * @returns The selector value.
6174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6175 * @param iSegReg The segment register.
6176 */
6177DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6178{
6179 Assert(iSegReg < X86_SREG_COUNT);
6180 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6181}
6182
6183
6184/**
6185 * Fetches the base address value of a segment register.
6186 *
6187 * @returns The selector value.
6188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6189 * @param iSegReg The segment register.
6190 */
6191DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6192{
6193 Assert(iSegReg < X86_SREG_COUNT);
6194 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6195}
6196
6197
6198/**
6199 * Gets a reference (pointer) to the specified general purpose register.
6200 *
6201 * @returns Register reference.
6202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6203 * @param iReg The general purpose register.
6204 */
6205DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6206{
6207 Assert(iReg < 16);
6208 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6209 return &pCtx->aGRegs[iReg];
6210}
6211
6212
6213/**
6214 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6215 *
6216 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6217 *
6218 * @returns Register reference.
6219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6220 * @param iReg The register.
6221 */
6222DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6223{
6224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6225 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6226 {
6227 Assert(iReg < 16);
6228 return &pCtx->aGRegs[iReg].u8;
6229 }
6230 /* high 8-bit register. */
6231 Assert(iReg < 8);
6232 return &pCtx->aGRegs[iReg & 3].bHi;
6233}
6234
6235
6236/**
6237 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6238 *
6239 * @returns Register reference.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 16);
6246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6247 return &pCtx->aGRegs[iReg].u16;
6248}
6249
6250
6251/**
6252 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6253 *
6254 * @returns Register reference.
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 * @param iReg The register.
6257 */
6258DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6259{
6260 Assert(iReg < 16);
6261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6262 return &pCtx->aGRegs[iReg].u32;
6263}
6264
6265
6266/**
6267 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6268 *
6269 * @returns Register reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param iReg The register.
6272 */
6273DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6274{
6275 Assert(iReg < 64);
6276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6277 return &pCtx->aGRegs[iReg].u64;
6278}
6279
6280
6281/**
6282 * Gets a reference (pointer) to the specified segment register's base address.
6283 *
6284 * @returns Segment register base address reference.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iSegReg The segment selector.
6287 */
6288DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6289{
6290 Assert(iSegReg < X86_SREG_COUNT);
6291 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6292 return &pCtx->aSRegs[iSegReg].u64Base;
6293}
6294
6295
6296/**
6297 * Fetches the value of a 8-bit general purpose register.
6298 *
6299 * @returns The register value.
6300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6301 * @param iReg The register.
6302 */
6303DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6304{
6305 return *iemGRegRefU8(pVCpu, iReg);
6306}
6307
6308
6309/**
6310 * Fetches the value of a 16-bit general purpose register.
6311 *
6312 * @returns The register value.
6313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6314 * @param iReg The register.
6315 */
6316DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6317{
6318 Assert(iReg < 16);
6319 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6320}
6321
6322
6323/**
6324 * Fetches the value of a 32-bit general purpose register.
6325 *
6326 * @returns The register value.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iReg The register.
6329 */
6330DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6331{
6332 Assert(iReg < 16);
6333 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6334}
6335
6336
6337/**
6338 * Fetches the value of a 64-bit general purpose register.
6339 *
6340 * @returns The register value.
6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6342 * @param iReg The register.
6343 */
6344DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6345{
6346 Assert(iReg < 16);
6347 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6348}
6349
6350
6351/**
6352 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6353 *
6354 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6355 * segment limit.
6356 *
6357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6358 * @param offNextInstr The offset of the next instruction.
6359 */
6360IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6361{
6362 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6363 switch (pVCpu->iem.s.enmEffOpSize)
6364 {
6365 case IEMMODE_16BIT:
6366 {
6367 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6368 if ( uNewIp > pCtx->cs.u32Limit
6369 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6370 return iemRaiseGeneralProtectionFault0(pVCpu);
6371 pCtx->rip = uNewIp;
6372 break;
6373 }
6374
6375 case IEMMODE_32BIT:
6376 {
6377 Assert(pCtx->rip <= UINT32_MAX);
6378 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6379
6380 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6381 if (uNewEip > pCtx->cs.u32Limit)
6382 return iemRaiseGeneralProtectionFault0(pVCpu);
6383 pCtx->rip = uNewEip;
6384 break;
6385 }
6386
6387 case IEMMODE_64BIT:
6388 {
6389 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6390
6391 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6392 if (!IEM_IS_CANONICAL(uNewRip))
6393 return iemRaiseGeneralProtectionFault0(pVCpu);
6394 pCtx->rip = uNewRip;
6395 break;
6396 }
6397
6398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6399 }
6400
6401 pCtx->eflags.Bits.u1RF = 0;
6402
6403#ifndef IEM_WITH_CODE_TLB
6404 /* Flush the prefetch buffer. */
6405 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6406#endif
6407
6408 return VINF_SUCCESS;
6409}
6410
6411
6412/**
6413 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6414 *
6415 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6416 * segment limit.
6417 *
6418 * @returns Strict VBox status code.
6419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6420 * @param offNextInstr The offset of the next instruction.
6421 */
6422IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6423{
6424 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6425 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6426
6427 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6428 if ( uNewIp > pCtx->cs.u32Limit
6429 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6430 return iemRaiseGeneralProtectionFault0(pVCpu);
6431 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6432 pCtx->rip = uNewIp;
6433 pCtx->eflags.Bits.u1RF = 0;
6434
6435#ifndef IEM_WITH_CODE_TLB
6436 /* Flush the prefetch buffer. */
6437 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6438#endif
6439
6440 return VINF_SUCCESS;
6441}
6442
6443
6444/**
6445 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6446 *
6447 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6448 * segment limit.
6449 *
6450 * @returns Strict VBox status code.
6451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6452 * @param offNextInstr The offset of the next instruction.
6453 */
6454IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6455{
6456 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6457 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6458
6459 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6460 {
6461 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6462
6463 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6464 if (uNewEip > pCtx->cs.u32Limit)
6465 return iemRaiseGeneralProtectionFault0(pVCpu);
6466 pCtx->rip = uNewEip;
6467 }
6468 else
6469 {
6470 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6471
6472 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6473 if (!IEM_IS_CANONICAL(uNewRip))
6474 return iemRaiseGeneralProtectionFault0(pVCpu);
6475 pCtx->rip = uNewRip;
6476 }
6477 pCtx->eflags.Bits.u1RF = 0;
6478
6479#ifndef IEM_WITH_CODE_TLB
6480 /* Flush the prefetch buffer. */
6481 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6482#endif
6483
6484 return VINF_SUCCESS;
6485}
6486
6487
6488/**
6489 * Performs a near jump to the specified address.
6490 *
6491 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6492 * segment limit.
6493 *
6494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6495 * @param uNewRip The new RIP value.
6496 */
6497IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6498{
6499 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6500 switch (pVCpu->iem.s.enmEffOpSize)
6501 {
6502 case IEMMODE_16BIT:
6503 {
6504 Assert(uNewRip <= UINT16_MAX);
6505 if ( uNewRip > pCtx->cs.u32Limit
6506 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6507 return iemRaiseGeneralProtectionFault0(pVCpu);
6508 /** @todo Test 16-bit jump in 64-bit mode. */
6509 pCtx->rip = uNewRip;
6510 break;
6511 }
6512
6513 case IEMMODE_32BIT:
6514 {
6515 Assert(uNewRip <= UINT32_MAX);
6516 Assert(pCtx->rip <= UINT32_MAX);
6517 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6518
6519 if (uNewRip > pCtx->cs.u32Limit)
6520 return iemRaiseGeneralProtectionFault0(pVCpu);
6521 pCtx->rip = uNewRip;
6522 break;
6523 }
6524
6525 case IEMMODE_64BIT:
6526 {
6527 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6528
6529 if (!IEM_IS_CANONICAL(uNewRip))
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 pCtx->rip = uNewRip;
6532 break;
6533 }
6534
6535 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6536 }
6537
6538 pCtx->eflags.Bits.u1RF = 0;
6539
6540#ifndef IEM_WITH_CODE_TLB
6541 /* Flush the prefetch buffer. */
6542 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6543#endif
6544
6545 return VINF_SUCCESS;
6546}
6547
6548
6549/**
6550 * Get the address of the top of the stack.
6551 *
6552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6553 * @param pCtx The CPU context which SP/ESP/RSP should be
6554 * read.
6555 */
6556DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6557{
6558 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6559 return pCtx->rsp;
6560 if (pCtx->ss.Attr.n.u1DefBig)
6561 return pCtx->esp;
6562 return pCtx->sp;
6563}
6564
6565
6566/**
6567 * Updates the RIP/EIP/IP to point to the next instruction.
6568 *
6569 * This function leaves the EFLAGS.RF flag alone.
6570 *
6571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6572 * @param cbInstr The number of bytes to add.
6573 */
6574IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6575{
6576 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6577 switch (pVCpu->iem.s.enmCpuMode)
6578 {
6579 case IEMMODE_16BIT:
6580 Assert(pCtx->rip <= UINT16_MAX);
6581 pCtx->eip += cbInstr;
6582 pCtx->eip &= UINT32_C(0xffff);
6583 break;
6584
6585 case IEMMODE_32BIT:
6586 pCtx->eip += cbInstr;
6587 Assert(pCtx->rip <= UINT32_MAX);
6588 break;
6589
6590 case IEMMODE_64BIT:
6591 pCtx->rip += cbInstr;
6592 break;
6593 default: AssertFailed();
6594 }
6595}
6596
6597
6598#if 0
6599/**
6600 * Updates the RIP/EIP/IP to point to the next instruction.
6601 *
6602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6603 */
6604IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6605{
6606 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6607}
6608#endif
6609
6610
6611
6612/**
6613 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6614 *
6615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6616 * @param cbInstr The number of bytes to add.
6617 */
6618IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6619{
6620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6621
6622 pCtx->eflags.Bits.u1RF = 0;
6623
6624 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6625#if ARCH_BITS >= 64
6626 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6627 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6628 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6629#else
6630 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6631 pCtx->rip += cbInstr;
6632 else
6633 pCtx->eip += cbInstr;
6634#endif
6635}
6636
6637
6638/**
6639 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6640 *
6641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6642 */
6643IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6644{
6645 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6646}
6647
6648
6649/**
6650 * Adds to the stack pointer.
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 * @param pCtx The CPU context which SP/ESP/RSP should be
6654 * updated.
6655 * @param cbToAdd The number of bytes to add (8-bit!).
6656 */
6657DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6658{
6659 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6660 pCtx->rsp += cbToAdd;
6661 else if (pCtx->ss.Attr.n.u1DefBig)
6662 pCtx->esp += cbToAdd;
6663 else
6664 pCtx->sp += cbToAdd;
6665}
6666
6667
6668/**
6669 * Subtracts from the stack pointer.
6670 *
6671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6672 * @param pCtx The CPU context which SP/ESP/RSP should be
6673 * updated.
6674 * @param cbToSub The number of bytes to subtract (8-bit!).
6675 */
6676DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6677{
6678 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6679 pCtx->rsp -= cbToSub;
6680 else if (pCtx->ss.Attr.n.u1DefBig)
6681 pCtx->esp -= cbToSub;
6682 else
6683 pCtx->sp -= cbToSub;
6684}
6685
6686
6687/**
6688 * Adds to the temporary stack pointer.
6689 *
6690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6691 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6692 * @param cbToAdd The number of bytes to add (16-bit).
6693 * @param pCtx Where to get the current stack mode.
6694 */
6695DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6696{
6697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6698 pTmpRsp->u += cbToAdd;
6699 else if (pCtx->ss.Attr.n.u1DefBig)
6700 pTmpRsp->DWords.dw0 += cbToAdd;
6701 else
6702 pTmpRsp->Words.w0 += cbToAdd;
6703}
6704
6705
6706/**
6707 * Subtracts from the temporary stack pointer.
6708 *
6709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6710 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6711 * @param cbToSub The number of bytes to subtract.
6712 * @param pCtx Where to get the current stack mode.
6713 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6714 * expecting that.
6715 */
6716DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6717{
6718 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6719 pTmpRsp->u -= cbToSub;
6720 else if (pCtx->ss.Attr.n.u1DefBig)
6721 pTmpRsp->DWords.dw0 -= cbToSub;
6722 else
6723 pTmpRsp->Words.w0 -= cbToSub;
6724}
6725
6726
6727/**
6728 * Calculates the effective stack address for a push of the specified size as
6729 * well as the new RSP value (upper bits may be masked).
6730 *
6731 * @returns Effective stack addressf for the push.
6732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6733 * @param pCtx Where to get the current stack mode.
6734 * @param cbItem The size of the stack item to pop.
6735 * @param puNewRsp Where to return the new RSP value.
6736 */
6737DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6738{
6739 RTUINT64U uTmpRsp;
6740 RTGCPTR GCPtrTop;
6741 uTmpRsp.u = pCtx->rsp;
6742
6743 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6744 GCPtrTop = uTmpRsp.u -= cbItem;
6745 else if (pCtx->ss.Attr.n.u1DefBig)
6746 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6747 else
6748 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6749 *puNewRsp = uTmpRsp.u;
6750 return GCPtrTop;
6751}
6752
6753
6754/**
6755 * Gets the current stack pointer and calculates the value after a pop of the
6756 * specified size.
6757 *
6758 * @returns Current stack pointer.
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 * @param pCtx Where to get the current stack mode.
6761 * @param cbItem The size of the stack item to pop.
6762 * @param puNewRsp Where to return the new RSP value.
6763 */
6764DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6765{
6766 RTUINT64U uTmpRsp;
6767 RTGCPTR GCPtrTop;
6768 uTmpRsp.u = pCtx->rsp;
6769
6770 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6771 {
6772 GCPtrTop = uTmpRsp.u;
6773 uTmpRsp.u += cbItem;
6774 }
6775 else if (pCtx->ss.Attr.n.u1DefBig)
6776 {
6777 GCPtrTop = uTmpRsp.DWords.dw0;
6778 uTmpRsp.DWords.dw0 += cbItem;
6779 }
6780 else
6781 {
6782 GCPtrTop = uTmpRsp.Words.w0;
6783 uTmpRsp.Words.w0 += cbItem;
6784 }
6785 *puNewRsp = uTmpRsp.u;
6786 return GCPtrTop;
6787}
6788
6789
6790/**
6791 * Calculates the effective stack address for a push of the specified size as
6792 * well as the new temporary RSP value (upper bits may be masked).
6793 *
6794 * @returns Effective stack addressf for the push.
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param pCtx Where to get the current stack mode.
6797 * @param pTmpRsp The temporary stack pointer. This is updated.
6798 * @param cbItem The size of the stack item to pop.
6799 */
6800DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6801{
6802 RTGCPTR GCPtrTop;
6803
6804 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6805 GCPtrTop = pTmpRsp->u -= cbItem;
6806 else if (pCtx->ss.Attr.n.u1DefBig)
6807 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6808 else
6809 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6810 return GCPtrTop;
6811}
6812
6813
6814/**
6815 * Gets the effective stack address for a pop of the specified size and
6816 * calculates and updates the temporary RSP.
6817 *
6818 * @returns Current stack pointer.
6819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6820 * @param pCtx Where to get the current stack mode.
6821 * @param pTmpRsp The temporary stack pointer. This is updated.
6822 * @param cbItem The size of the stack item to pop.
6823 */
6824DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6825{
6826 RTGCPTR GCPtrTop;
6827 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6828 {
6829 GCPtrTop = pTmpRsp->u;
6830 pTmpRsp->u += cbItem;
6831 }
6832 else if (pCtx->ss.Attr.n.u1DefBig)
6833 {
6834 GCPtrTop = pTmpRsp->DWords.dw0;
6835 pTmpRsp->DWords.dw0 += cbItem;
6836 }
6837 else
6838 {
6839 GCPtrTop = pTmpRsp->Words.w0;
6840 pTmpRsp->Words.w0 += cbItem;
6841 }
6842 return GCPtrTop;
6843}
6844
6845/** @} */
6846
6847
6848/** @name FPU access and helpers.
6849 *
6850 * @{
6851 */
6852
6853
6854/**
6855 * Hook for preparing to use the host FPU.
6856 *
6857 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6858 *
6859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6860 */
6861DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6862{
6863#ifdef IN_RING3
6864 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6865#else
6866 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6867#endif
6868}
6869
6870
6871/**
6872 * Hook for preparing to use the host FPU for SSE.
6873 *
6874 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6875 *
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 */
6878DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6879{
6880 iemFpuPrepareUsage(pVCpu);
6881}
6882
6883
6884/**
6885 * Hook for preparing to use the host FPU for AVX.
6886 *
6887 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6888 *
6889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6890 */
6891DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6892{
6893 iemFpuPrepareUsage(pVCpu);
6894}
6895
6896
6897/**
6898 * Hook for actualizing the guest FPU state before the interpreter reads it.
6899 *
6900 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6901 *
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 */
6904DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6905{
6906#ifdef IN_RING3
6907 NOREF(pVCpu);
6908#else
6909 CPUMRZFpuStateActualizeForRead(pVCpu);
6910#endif
6911}
6912
6913
6914/**
6915 * Hook for actualizing the guest FPU state before the interpreter changes it.
6916 *
6917 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 */
6921DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6922{
6923#ifdef IN_RING3
6924 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6925#else
6926 CPUMRZFpuStateActualizeForChange(pVCpu);
6927#endif
6928}
6929
6930
6931/**
6932 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6933 * only.
6934 *
6935 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6940{
6941#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6942 NOREF(pVCpu);
6943#else
6944 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6945#endif
6946}
6947
6948
6949/**
6950 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6951 * read+write.
6952 *
6953 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6954 *
6955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6956 */
6957DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6958{
6959#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6960 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6961#else
6962 CPUMRZFpuStateActualizeForChange(pVCpu);
6963#endif
6964}
6965
6966
6967/**
6968 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6969 * only.
6970 *
6971 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 */
6975DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6976{
6977#ifdef IN_RING3
6978 NOREF(pVCpu);
6979#else
6980 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6981#endif
6982}
6983
6984
6985/**
6986 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6987 * read+write.
6988 *
6989 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6990 *
6991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6992 */
6993DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6994{
6995#ifdef IN_RING3
6996 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6997#else
6998 CPUMRZFpuStateActualizeForChange(pVCpu);
6999#endif
7000}
7001
7002
7003/**
7004 * Stores a QNaN value into a FPU register.
7005 *
7006 * @param pReg Pointer to the register.
7007 */
7008DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7009{
7010 pReg->au32[0] = UINT32_C(0x00000000);
7011 pReg->au32[1] = UINT32_C(0xc0000000);
7012 pReg->au16[4] = UINT16_C(0xffff);
7013}
7014
7015
7016/**
7017 * Updates the FOP, FPU.CS and FPUIP registers.
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 * @param pCtx The CPU context.
7021 * @param pFpuCtx The FPU context.
7022 */
7023DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7024{
7025 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7026 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7027 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7028 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7029 {
7030 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7031 * happens in real mode here based on the fnsave and fnstenv images. */
7032 pFpuCtx->CS = 0;
7033 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7034 }
7035 else
7036 {
7037 pFpuCtx->CS = pCtx->cs.Sel;
7038 pFpuCtx->FPUIP = pCtx->rip;
7039 }
7040}
7041
7042
7043/**
7044 * Updates the x87.DS and FPUDP registers.
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param pCtx The CPU context.
7048 * @param pFpuCtx The FPU context.
7049 * @param iEffSeg The effective segment register.
7050 * @param GCPtrEff The effective address relative to @a iEffSeg.
7051 */
7052DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7053{
7054 RTSEL sel;
7055 switch (iEffSeg)
7056 {
7057 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7058 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7059 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7060 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7061 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7062 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7063 default:
7064 AssertMsgFailed(("%d\n", iEffSeg));
7065 sel = pCtx->ds.Sel;
7066 }
7067 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7068 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7069 {
7070 pFpuCtx->DS = 0;
7071 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7072 }
7073 else
7074 {
7075 pFpuCtx->DS = sel;
7076 pFpuCtx->FPUDP = GCPtrEff;
7077 }
7078}
7079
7080
7081/**
7082 * Rotates the stack registers in the push direction.
7083 *
7084 * @param pFpuCtx The FPU context.
7085 * @remarks This is a complete waste of time, but fxsave stores the registers in
7086 * stack order.
7087 */
7088DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7089{
7090 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7091 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7092 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7093 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7094 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7095 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7096 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7097 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7098 pFpuCtx->aRegs[0].r80 = r80Tmp;
7099}
7100
7101
7102/**
7103 * Rotates the stack registers in the pop direction.
7104 *
7105 * @param pFpuCtx The FPU context.
7106 * @remarks This is a complete waste of time, but fxsave stores the registers in
7107 * stack order.
7108 */
7109DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7110{
7111 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7112 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7113 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7114 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7115 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7116 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7117 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7118 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7119 pFpuCtx->aRegs[7].r80 = r80Tmp;
7120}
7121
7122
7123/**
7124 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7125 * exception prevents it.
7126 *
7127 * @param pResult The FPU operation result to push.
7128 * @param pFpuCtx The FPU context.
7129 */
7130IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7131{
7132 /* Update FSW and bail if there are pending exceptions afterwards. */
7133 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7134 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7135 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7136 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7137 {
7138 pFpuCtx->FSW = fFsw;
7139 return;
7140 }
7141
7142 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7143 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7144 {
7145 /* All is fine, push the actual value. */
7146 pFpuCtx->FTW |= RT_BIT(iNewTop);
7147 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7148 }
7149 else if (pFpuCtx->FCW & X86_FCW_IM)
7150 {
7151 /* Masked stack overflow, push QNaN. */
7152 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7153 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7154 }
7155 else
7156 {
7157 /* Raise stack overflow, don't push anything. */
7158 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7159 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7160 return;
7161 }
7162
7163 fFsw &= ~X86_FSW_TOP_MASK;
7164 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7165 pFpuCtx->FSW = fFsw;
7166
7167 iemFpuRotateStackPush(pFpuCtx);
7168}
7169
7170
7171/**
7172 * Stores a result in a FPU register and updates the FSW and FTW.
7173 *
7174 * @param pFpuCtx The FPU context.
7175 * @param pResult The result to store.
7176 * @param iStReg Which FPU register to store it in.
7177 */
7178IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7179{
7180 Assert(iStReg < 8);
7181 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7182 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7183 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7184 pFpuCtx->FTW |= RT_BIT(iReg);
7185 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7186}
7187
7188
7189/**
7190 * Only updates the FPU status word (FSW) with the result of the current
7191 * instruction.
7192 *
7193 * @param pFpuCtx The FPU context.
7194 * @param u16FSW The FSW output of the current instruction.
7195 */
7196IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7197{
7198 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7199 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7200}
7201
7202
7203/**
7204 * Pops one item off the FPU stack if no pending exception prevents it.
7205 *
7206 * @param pFpuCtx The FPU context.
7207 */
7208IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7209{
7210 /* Check pending exceptions. */
7211 uint16_t uFSW = pFpuCtx->FSW;
7212 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7213 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7214 return;
7215
7216 /* TOP--. */
7217 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7218 uFSW &= ~X86_FSW_TOP_MASK;
7219 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7220 pFpuCtx->FSW = uFSW;
7221
7222 /* Mark the previous ST0 as empty. */
7223 iOldTop >>= X86_FSW_TOP_SHIFT;
7224 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7225
7226 /* Rotate the registers. */
7227 iemFpuRotateStackPop(pFpuCtx);
7228}
7229
7230
7231/**
7232 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7233 *
7234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7235 * @param pResult The FPU operation result to push.
7236 */
7237IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7238{
7239 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7240 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7241 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7242 iemFpuMaybePushResult(pResult, pFpuCtx);
7243}
7244
7245
7246/**
7247 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7248 * and sets FPUDP and FPUDS.
7249 *
7250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7251 * @param pResult The FPU operation result to push.
7252 * @param iEffSeg The effective segment register.
7253 * @param GCPtrEff The effective address relative to @a iEffSeg.
7254 */
7255IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7256{
7257 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7258 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7259 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7260 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7261 iemFpuMaybePushResult(pResult, pFpuCtx);
7262}
7263
7264
7265/**
7266 * Replace ST0 with the first value and push the second onto the FPU stack,
7267 * unless a pending exception prevents it.
7268 *
7269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7270 * @param pResult The FPU operation result to store and push.
7271 */
7272IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7273{
7274 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7275 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7276 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7277
7278 /* Update FSW and bail if there are pending exceptions afterwards. */
7279 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7280 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7281 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7282 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7283 {
7284 pFpuCtx->FSW = fFsw;
7285 return;
7286 }
7287
7288 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7289 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7290 {
7291 /* All is fine, push the actual value. */
7292 pFpuCtx->FTW |= RT_BIT(iNewTop);
7293 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7294 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7295 }
7296 else if (pFpuCtx->FCW & X86_FCW_IM)
7297 {
7298 /* Masked stack overflow, push QNaN. */
7299 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7300 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7301 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7302 }
7303 else
7304 {
7305 /* Raise stack overflow, don't push anything. */
7306 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7307 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7308 return;
7309 }
7310
7311 fFsw &= ~X86_FSW_TOP_MASK;
7312 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7313 pFpuCtx->FSW = fFsw;
7314
7315 iemFpuRotateStackPush(pFpuCtx);
7316}
7317
7318
7319/**
7320 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7321 * FOP.
7322 *
7323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7324 * @param pResult The result to store.
7325 * @param iStReg Which FPU register to store it in.
7326 */
7327IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7328{
7329 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7330 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7331 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7332 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7333}
7334
7335
7336/**
7337 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7338 * FOP, and then pops the stack.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pResult The result to store.
7342 * @param iStReg Which FPU register to store it in.
7343 */
7344IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7345{
7346 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7347 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7348 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7349 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7350 iemFpuMaybePopOne(pFpuCtx);
7351}
7352
7353
7354/**
7355 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7356 * FPUDP, and FPUDS.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param pResult The result to store.
7360 * @param iStReg Which FPU register to store it in.
7361 * @param iEffSeg The effective memory operand selector register.
7362 * @param GCPtrEff The effective memory operand offset.
7363 */
7364IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7365 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7366{
7367 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7368 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7369 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7370 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7371 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7372}
7373
7374
7375/**
7376 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7377 * FPUDP, and FPUDS, and then pops the stack.
7378 *
7379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7380 * @param pResult The result to store.
7381 * @param iStReg Which FPU register to store it in.
7382 * @param iEffSeg The effective memory operand selector register.
7383 * @param GCPtrEff The effective memory operand offset.
7384 */
7385IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7386 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7387{
7388 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7389 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7390 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7392 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7393 iemFpuMaybePopOne(pFpuCtx);
7394}
7395
7396
7397/**
7398 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 */
7402IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7403{
7404 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7405 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7406 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7407}
7408
7409
7410/**
7411 * Marks the specified stack register as free (for FFREE).
7412 *
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 * @param iStReg The register to free.
7415 */
7416IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7417{
7418 Assert(iStReg < 8);
7419 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7420 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7421 pFpuCtx->FTW &= ~RT_BIT(iReg);
7422}
7423
7424
7425/**
7426 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7427 *
7428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7429 */
7430IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7431{
7432 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7433 uint16_t uFsw = pFpuCtx->FSW;
7434 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7435 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7436 uFsw &= ~X86_FSW_TOP_MASK;
7437 uFsw |= uTop;
7438 pFpuCtx->FSW = uFsw;
7439}
7440
7441
7442/**
7443 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7444 *
7445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7446 */
7447IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7448{
7449 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7450 uint16_t uFsw = pFpuCtx->FSW;
7451 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7452 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7453 uFsw &= ~X86_FSW_TOP_MASK;
7454 uFsw |= uTop;
7455 pFpuCtx->FSW = uFsw;
7456}
7457
7458
7459/**
7460 * Updates the FSW, FOP, FPUIP, and FPUCS.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param u16FSW The FSW from the current instruction.
7464 */
7465IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7466{
7467 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7468 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7470 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7471}
7472
7473
7474/**
7475 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7476 *
7477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7478 * @param u16FSW The FSW from the current instruction.
7479 */
7480IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7481{
7482 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7483 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7484 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7485 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7486 iemFpuMaybePopOne(pFpuCtx);
7487}
7488
7489
7490/**
7491 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7492 *
7493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7494 * @param u16FSW The FSW from the current instruction.
7495 * @param iEffSeg The effective memory operand selector register.
7496 * @param GCPtrEff The effective memory operand offset.
7497 */
7498IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7499{
7500 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7501 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7502 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7504 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7505}
7506
7507
7508/**
7509 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7510 *
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 * @param u16FSW The FSW from the current instruction.
7513 */
7514IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7515{
7516 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7517 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7518 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7519 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7520 iemFpuMaybePopOne(pFpuCtx);
7521 iemFpuMaybePopOne(pFpuCtx);
7522}
7523
7524
7525/**
7526 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7527 *
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param u16FSW The FSW from the current instruction.
7530 * @param iEffSeg The effective memory operand selector register.
7531 * @param GCPtrEff The effective memory operand offset.
7532 */
7533IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7534{
7535 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7536 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7537 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7539 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7540 iemFpuMaybePopOne(pFpuCtx);
7541}
7542
7543
7544/**
7545 * Worker routine for raising an FPU stack underflow exception.
7546 *
7547 * @param pFpuCtx The FPU context.
7548 * @param iStReg The stack register being accessed.
7549 */
7550IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7551{
7552 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7553 if (pFpuCtx->FCW & X86_FCW_IM)
7554 {
7555 /* Masked underflow. */
7556 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7557 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7558 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7559 if (iStReg != UINT8_MAX)
7560 {
7561 pFpuCtx->FTW |= RT_BIT(iReg);
7562 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7563 }
7564 }
7565 else
7566 {
7567 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7568 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7569 }
7570}
7571
7572
7573/**
7574 * Raises a FPU stack underflow exception.
7575 *
7576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7577 * @param iStReg The destination register that should be loaded
7578 * with QNaN if \#IS is not masked. Specify
7579 * UINT8_MAX if none (like for fcom).
7580 */
7581DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7582{
7583 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7584 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7585 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7586 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7587}
7588
7589
7590DECL_NO_INLINE(IEM_STATIC, void)
7591iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7592{
7593 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7594 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7597 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7598}
7599
7600
7601DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7602{
7603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7604 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7606 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7607 iemFpuMaybePopOne(pFpuCtx);
7608}
7609
7610
7611DECL_NO_INLINE(IEM_STATIC, void)
7612iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7613{
7614 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7615 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7616 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7617 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7618 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7619 iemFpuMaybePopOne(pFpuCtx);
7620}
7621
7622
7623DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7624{
7625 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7626 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7627 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7628 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7629 iemFpuMaybePopOne(pFpuCtx);
7630 iemFpuMaybePopOne(pFpuCtx);
7631}
7632
7633
7634DECL_NO_INLINE(IEM_STATIC, void)
7635iemFpuStackPushUnderflow(PVMCPU pVCpu)
7636{
7637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7638 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7639 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7640
7641 if (pFpuCtx->FCW & X86_FCW_IM)
7642 {
7643 /* Masked overflow - Push QNaN. */
7644 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7645 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7646 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7647 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7648 pFpuCtx->FTW |= RT_BIT(iNewTop);
7649 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7650 iemFpuRotateStackPush(pFpuCtx);
7651 }
7652 else
7653 {
7654 /* Exception pending - don't change TOP or the register stack. */
7655 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7656 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7657 }
7658}
7659
7660
7661DECL_NO_INLINE(IEM_STATIC, void)
7662iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7663{
7664 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7665 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7666 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7667
7668 if (pFpuCtx->FCW & X86_FCW_IM)
7669 {
7670 /* Masked overflow - Push QNaN. */
7671 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7672 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7673 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7674 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7675 pFpuCtx->FTW |= RT_BIT(iNewTop);
7676 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7677 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7678 iemFpuRotateStackPush(pFpuCtx);
7679 }
7680 else
7681 {
7682 /* Exception pending - don't change TOP or the register stack. */
7683 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7684 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7685 }
7686}
7687
7688
7689/**
7690 * Worker routine for raising an FPU stack overflow exception on a push.
7691 *
7692 * @param pFpuCtx The FPU context.
7693 */
7694IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7695{
7696 if (pFpuCtx->FCW & X86_FCW_IM)
7697 {
7698 /* Masked overflow. */
7699 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7700 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7701 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7702 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7703 pFpuCtx->FTW |= RT_BIT(iNewTop);
7704 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7705 iemFpuRotateStackPush(pFpuCtx);
7706 }
7707 else
7708 {
7709 /* Exception pending - don't change TOP or the register stack. */
7710 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7711 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7712 }
7713}
7714
7715
7716/**
7717 * Raises a FPU stack overflow exception on a push.
7718 *
7719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7720 */
7721DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7722{
7723 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7724 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7725 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7726 iemFpuStackPushOverflowOnly(pFpuCtx);
7727}
7728
7729
7730/**
7731 * Raises a FPU stack overflow exception on a push with a memory operand.
7732 *
7733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7734 * @param iEffSeg The effective memory operand selector register.
7735 * @param GCPtrEff The effective memory operand offset.
7736 */
7737DECL_NO_INLINE(IEM_STATIC, void)
7738iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7739{
7740 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7741 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7742 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7743 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7744 iemFpuStackPushOverflowOnly(pFpuCtx);
7745}
7746
7747
7748IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7749{
7750 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7751 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7752 if (pFpuCtx->FTW & RT_BIT(iReg))
7753 return VINF_SUCCESS;
7754 return VERR_NOT_FOUND;
7755}
7756
7757
7758IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7759{
7760 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7761 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7762 if (pFpuCtx->FTW & RT_BIT(iReg))
7763 {
7764 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7765 return VINF_SUCCESS;
7766 }
7767 return VERR_NOT_FOUND;
7768}
7769
7770
7771IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7772 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7773{
7774 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7775 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7776 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7777 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7778 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7779 {
7780 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7781 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7782 return VINF_SUCCESS;
7783 }
7784 return VERR_NOT_FOUND;
7785}
7786
7787
7788IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7789{
7790 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7791 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7792 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7793 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7794 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7795 {
7796 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7797 return VINF_SUCCESS;
7798 }
7799 return VERR_NOT_FOUND;
7800}
7801
7802
7803/**
7804 * Updates the FPU exception status after FCW is changed.
7805 *
7806 * @param pFpuCtx The FPU context.
7807 */
7808IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7809{
7810 uint16_t u16Fsw = pFpuCtx->FSW;
7811 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7812 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7813 else
7814 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7815 pFpuCtx->FSW = u16Fsw;
7816}
7817
7818
7819/**
7820 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7821 *
7822 * @returns The full FTW.
7823 * @param pFpuCtx The FPU context.
7824 */
7825IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7826{
7827 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7828 uint16_t u16Ftw = 0;
7829 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7830 for (unsigned iSt = 0; iSt < 8; iSt++)
7831 {
7832 unsigned const iReg = (iSt + iTop) & 7;
7833 if (!(u8Ftw & RT_BIT(iReg)))
7834 u16Ftw |= 3 << (iReg * 2); /* empty */
7835 else
7836 {
7837 uint16_t uTag;
7838 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7839 if (pr80Reg->s.uExponent == 0x7fff)
7840 uTag = 2; /* Exponent is all 1's => Special. */
7841 else if (pr80Reg->s.uExponent == 0x0000)
7842 {
7843 if (pr80Reg->s.u64Mantissa == 0x0000)
7844 uTag = 1; /* All bits are zero => Zero. */
7845 else
7846 uTag = 2; /* Must be special. */
7847 }
7848 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7849 uTag = 0; /* Valid. */
7850 else
7851 uTag = 2; /* Must be special. */
7852
7853 u16Ftw |= uTag << (iReg * 2); /* empty */
7854 }
7855 }
7856
7857 return u16Ftw;
7858}
7859
7860
7861/**
7862 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7863 *
7864 * @returns The compressed FTW.
7865 * @param u16FullFtw The full FTW to convert.
7866 */
7867IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7868{
7869 uint8_t u8Ftw = 0;
7870 for (unsigned i = 0; i < 8; i++)
7871 {
7872 if ((u16FullFtw & 3) != 3 /*empty*/)
7873 u8Ftw |= RT_BIT(i);
7874 u16FullFtw >>= 2;
7875 }
7876
7877 return u8Ftw;
7878}
7879
7880/** @} */
7881
7882
7883/** @name Memory access.
7884 *
7885 * @{
7886 */
7887
7888
7889/**
7890 * Updates the IEMCPU::cbWritten counter if applicable.
7891 *
7892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7893 * @param fAccess The access being accounted for.
7894 * @param cbMem The access size.
7895 */
7896DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7897{
7898 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7899 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7900 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7901}
7902
7903
7904/**
7905 * Checks if the given segment can be written to, raise the appropriate
7906 * exception if not.
7907 *
7908 * @returns VBox strict status code.
7909 *
7910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7911 * @param pHid Pointer to the hidden register.
7912 * @param iSegReg The register number.
7913 * @param pu64BaseAddr Where to return the base address to use for the
7914 * segment. (In 64-bit code it may differ from the
7915 * base in the hidden segment.)
7916 */
7917IEM_STATIC VBOXSTRICTRC
7918iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7919{
7920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7921 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7922 else
7923 {
7924 if (!pHid->Attr.n.u1Present)
7925 {
7926 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7927 AssertRelease(uSel == 0);
7928 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7929 return iemRaiseGeneralProtectionFault0(pVCpu);
7930 }
7931
7932 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7933 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7934 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7935 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7936 *pu64BaseAddr = pHid->u64Base;
7937 }
7938 return VINF_SUCCESS;
7939}
7940
7941
7942/**
7943 * Checks if the given segment can be read from, raise the appropriate
7944 * exception if not.
7945 *
7946 * @returns VBox strict status code.
7947 *
7948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7949 * @param pHid Pointer to the hidden register.
7950 * @param iSegReg The register number.
7951 * @param pu64BaseAddr Where to return the base address to use for the
7952 * segment. (In 64-bit code it may differ from the
7953 * base in the hidden segment.)
7954 */
7955IEM_STATIC VBOXSTRICTRC
7956iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7957{
7958 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7959 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7960 else
7961 {
7962 if (!pHid->Attr.n.u1Present)
7963 {
7964 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7965 AssertRelease(uSel == 0);
7966 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7967 return iemRaiseGeneralProtectionFault0(pVCpu);
7968 }
7969
7970 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7971 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7972 *pu64BaseAddr = pHid->u64Base;
7973 }
7974 return VINF_SUCCESS;
7975}
7976
7977
7978/**
7979 * Applies the segment limit, base and attributes.
7980 *
7981 * This may raise a \#GP or \#SS.
7982 *
7983 * @returns VBox strict status code.
7984 *
7985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7986 * @param fAccess The kind of access which is being performed.
7987 * @param iSegReg The index of the segment register to apply.
7988 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7989 * TSS, ++).
7990 * @param cbMem The access size.
7991 * @param pGCPtrMem Pointer to the guest memory address to apply
7992 * segmentation to. Input and output parameter.
7993 */
7994IEM_STATIC VBOXSTRICTRC
7995iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7996{
7997 if (iSegReg == UINT8_MAX)
7998 return VINF_SUCCESS;
7999
8000 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8001 switch (pVCpu->iem.s.enmCpuMode)
8002 {
8003 case IEMMODE_16BIT:
8004 case IEMMODE_32BIT:
8005 {
8006 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8007 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8008
8009 if ( pSel->Attr.n.u1Present
8010 && !pSel->Attr.n.u1Unusable)
8011 {
8012 Assert(pSel->Attr.n.u1DescType);
8013 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8014 {
8015 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8016 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8017 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8018
8019 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8020 {
8021 /** @todo CPL check. */
8022 }
8023
8024 /*
8025 * There are two kinds of data selectors, normal and expand down.
8026 */
8027 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8028 {
8029 if ( GCPtrFirst32 > pSel->u32Limit
8030 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8031 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8032 }
8033 else
8034 {
8035 /*
8036 * The upper boundary is defined by the B bit, not the G bit!
8037 */
8038 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8039 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8040 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8041 }
8042 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8043 }
8044 else
8045 {
8046
8047 /*
8048 * Code selector and usually be used to read thru, writing is
8049 * only permitted in real and V8086 mode.
8050 */
8051 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8052 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8053 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8054 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8055 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8056
8057 if ( GCPtrFirst32 > pSel->u32Limit
8058 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8059 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8060
8061 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8062 {
8063 /** @todo CPL check. */
8064 }
8065
8066 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8067 }
8068 }
8069 else
8070 return iemRaiseGeneralProtectionFault0(pVCpu);
8071 return VINF_SUCCESS;
8072 }
8073
8074 case IEMMODE_64BIT:
8075 {
8076 RTGCPTR GCPtrMem = *pGCPtrMem;
8077 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8078 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8079
8080 Assert(cbMem >= 1);
8081 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8082 return VINF_SUCCESS;
8083 return iemRaiseGeneralProtectionFault0(pVCpu);
8084 }
8085
8086 default:
8087 AssertFailedReturn(VERR_IEM_IPE_7);
8088 }
8089}
8090
8091
8092/**
8093 * Translates a virtual address to a physical physical address and checks if we
8094 * can access the page as specified.
8095 *
8096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8097 * @param GCPtrMem The virtual address.
8098 * @param fAccess The intended access.
8099 * @param pGCPhysMem Where to return the physical address.
8100 */
8101IEM_STATIC VBOXSTRICTRC
8102iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8103{
8104 /** @todo Need a different PGM interface here. We're currently using
8105 * generic / REM interfaces. this won't cut it for R0 & RC. */
8106 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8107 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8108 RTGCPHYS GCPhys;
8109 uint64_t fFlags;
8110 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8111 if (RT_FAILURE(rc))
8112 {
8113 /** @todo Check unassigned memory in unpaged mode. */
8114 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8115 *pGCPhysMem = NIL_RTGCPHYS;
8116 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8117 }
8118
8119 /* If the page is writable and does not have the no-exec bit set, all
8120 access is allowed. Otherwise we'll have to check more carefully... */
8121 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8122 {
8123 /* Write to read only memory? */
8124 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8125 && !(fFlags & X86_PTE_RW)
8126 && ( (pVCpu->iem.s.uCpl == 3
8127 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8128 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8129 {
8130 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8131 *pGCPhysMem = NIL_RTGCPHYS;
8132 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8133 }
8134
8135 /* Kernel memory accessed by userland? */
8136 if ( !(fFlags & X86_PTE_US)
8137 && pVCpu->iem.s.uCpl == 3
8138 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8139 {
8140 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8141 *pGCPhysMem = NIL_RTGCPHYS;
8142 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8143 }
8144
8145 /* Executing non-executable memory? */
8146 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8147 && (fFlags & X86_PTE_PAE_NX)
8148 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8149 {
8150 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8151 *pGCPhysMem = NIL_RTGCPHYS;
8152 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8153 VERR_ACCESS_DENIED);
8154 }
8155 }
8156
8157 /*
8158 * Set the dirty / access flags.
8159 * ASSUMES this is set when the address is translated rather than on committ...
8160 */
8161 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8162 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8163 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8164 {
8165 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8166 AssertRC(rc2);
8167 }
8168
8169 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8170 *pGCPhysMem = GCPhys;
8171 return VINF_SUCCESS;
8172}
8173
8174
8175
8176/**
8177 * Maps a physical page.
8178 *
8179 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8181 * @param GCPhysMem The physical address.
8182 * @param fAccess The intended access.
8183 * @param ppvMem Where to return the mapping address.
8184 * @param pLock The PGM lock.
8185 */
8186IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8187{
8188#ifdef IEM_VERIFICATION_MODE_FULL
8189 /* Force the alternative path so we can ignore writes. */
8190 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8191 {
8192 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8193 {
8194 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8195 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8196 if (RT_FAILURE(rc2))
8197 pVCpu->iem.s.fProblematicMemory = true;
8198 }
8199 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8200 }
8201#endif
8202#ifdef IEM_LOG_MEMORY_WRITES
8203 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8204 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8205#endif
8206#ifdef IEM_VERIFICATION_MODE_MINIMAL
8207 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8208#endif
8209
8210 /** @todo This API may require some improving later. A private deal with PGM
8211 * regarding locking and unlocking needs to be struct. A couple of TLBs
8212 * living in PGM, but with publicly accessible inlined access methods
8213 * could perhaps be an even better solution. */
8214 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8215 GCPhysMem,
8216 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8217 pVCpu->iem.s.fBypassHandlers,
8218 ppvMem,
8219 pLock);
8220 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8221 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8222
8223#ifdef IEM_VERIFICATION_MODE_FULL
8224 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8225 pVCpu->iem.s.fProblematicMemory = true;
8226#endif
8227 return rc;
8228}
8229
8230
8231/**
8232 * Unmap a page previously mapped by iemMemPageMap.
8233 *
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 * @param GCPhysMem The physical address.
8236 * @param fAccess The intended access.
8237 * @param pvMem What iemMemPageMap returned.
8238 * @param pLock The PGM lock.
8239 */
8240DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8241{
8242 NOREF(pVCpu);
8243 NOREF(GCPhysMem);
8244 NOREF(fAccess);
8245 NOREF(pvMem);
8246 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8247}
8248
8249
8250/**
8251 * Looks up a memory mapping entry.
8252 *
8253 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8255 * @param pvMem The memory address.
8256 * @param fAccess The access to.
8257 */
8258DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8259{
8260 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8261 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8262 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8263 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8264 return 0;
8265 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8266 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8267 return 1;
8268 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8269 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8270 return 2;
8271 return VERR_NOT_FOUND;
8272}
8273
8274
8275/**
8276 * Finds a free memmap entry when using iNextMapping doesn't work.
8277 *
8278 * @returns Memory mapping index, 1024 on failure.
8279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8280 */
8281IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8282{
8283 /*
8284 * The easy case.
8285 */
8286 if (pVCpu->iem.s.cActiveMappings == 0)
8287 {
8288 pVCpu->iem.s.iNextMapping = 1;
8289 return 0;
8290 }
8291
8292 /* There should be enough mappings for all instructions. */
8293 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8294
8295 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8296 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8297 return i;
8298
8299 AssertFailedReturn(1024);
8300}
8301
8302
8303/**
8304 * Commits a bounce buffer that needs writing back and unmaps it.
8305 *
8306 * @returns Strict VBox status code.
8307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8308 * @param iMemMap The index of the buffer to commit.
8309 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8310 * Always false in ring-3, obviously.
8311 */
8312IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8313{
8314 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8315 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8316#ifdef IN_RING3
8317 Assert(!fPostponeFail);
8318 RT_NOREF_PV(fPostponeFail);
8319#endif
8320
8321 /*
8322 * Do the writing.
8323 */
8324#ifndef IEM_VERIFICATION_MODE_MINIMAL
8325 PVM pVM = pVCpu->CTX_SUFF(pVM);
8326 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8327 && !IEM_VERIFICATION_ENABLED(pVCpu))
8328 {
8329 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8330 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8331 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8332 if (!pVCpu->iem.s.fBypassHandlers)
8333 {
8334 /*
8335 * Carefully and efficiently dealing with access handler return
8336 * codes make this a little bloated.
8337 */
8338 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8339 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8340 pbBuf,
8341 cbFirst,
8342 PGMACCESSORIGIN_IEM);
8343 if (rcStrict == VINF_SUCCESS)
8344 {
8345 if (cbSecond)
8346 {
8347 rcStrict = PGMPhysWrite(pVM,
8348 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8349 pbBuf + cbFirst,
8350 cbSecond,
8351 PGMACCESSORIGIN_IEM);
8352 if (rcStrict == VINF_SUCCESS)
8353 { /* nothing */ }
8354 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8355 {
8356 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8357 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8359 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8360 }
8361# ifndef IN_RING3
8362 else if (fPostponeFail)
8363 {
8364 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8367 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8368 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8369 return iemSetPassUpStatus(pVCpu, rcStrict);
8370 }
8371# endif
8372 else
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8377 return rcStrict;
8378 }
8379 }
8380 }
8381 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8382 {
8383 if (!cbSecond)
8384 {
8385 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8387 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8388 }
8389 else
8390 {
8391 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8393 pbBuf + cbFirst,
8394 cbSecond,
8395 PGMACCESSORIGIN_IEM);
8396 if (rcStrict2 == VINF_SUCCESS)
8397 {
8398 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8401 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8402 }
8403 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8404 {
8405 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8406 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8408 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8409 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8410 }
8411# ifndef IN_RING3
8412 else if (fPostponeFail)
8413 {
8414 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8417 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8418 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8419 return iemSetPassUpStatus(pVCpu, rcStrict);
8420 }
8421# endif
8422 else
8423 {
8424 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8427 return rcStrict2;
8428 }
8429 }
8430 }
8431# ifndef IN_RING3
8432 else if (fPostponeFail)
8433 {
8434 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8437 if (!cbSecond)
8438 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8439 else
8440 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8441 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8442 return iemSetPassUpStatus(pVCpu, rcStrict);
8443 }
8444# endif
8445 else
8446 {
8447 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8450 return rcStrict;
8451 }
8452 }
8453 else
8454 {
8455 /*
8456 * No access handlers, much simpler.
8457 */
8458 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8459 if (RT_SUCCESS(rc))
8460 {
8461 if (cbSecond)
8462 {
8463 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8464 if (RT_SUCCESS(rc))
8465 { /* likely */ }
8466 else
8467 {
8468 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8471 return rc;
8472 }
8473 }
8474 }
8475 else
8476 {
8477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8480 return rc;
8481 }
8482 }
8483 }
8484#endif
8485
8486#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8487 /*
8488 * Record the write(s).
8489 */
8490 if (!pVCpu->iem.s.fNoRem)
8491 {
8492 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8493 if (pEvtRec)
8494 {
8495 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8496 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8497 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8498 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8499 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8500 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8501 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8502 }
8503 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8504 {
8505 pEvtRec = iemVerifyAllocRecord(pVCpu);
8506 if (pEvtRec)
8507 {
8508 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8509 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8510 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8511 memcpy(pEvtRec->u.RamWrite.ab,
8512 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8514 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8515 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8516 }
8517 }
8518 }
8519#endif
8520#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8521 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8522 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8523 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8524 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8525 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8526 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8527
8528 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8529 g_cbIemWrote = cbWrote;
8530 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8531#endif
8532
8533 /*
8534 * Free the mapping entry.
8535 */
8536 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8537 Assert(pVCpu->iem.s.cActiveMappings != 0);
8538 pVCpu->iem.s.cActiveMappings--;
8539 return VINF_SUCCESS;
8540}
8541
8542
8543/**
8544 * iemMemMap worker that deals with a request crossing pages.
8545 */
8546IEM_STATIC VBOXSTRICTRC
8547iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8548{
8549 /*
8550 * Do the address translations.
8551 */
8552 RTGCPHYS GCPhysFirst;
8553 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8554 if (rcStrict != VINF_SUCCESS)
8555 return rcStrict;
8556
8557 RTGCPHYS GCPhysSecond;
8558 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8559 fAccess, &GCPhysSecond);
8560 if (rcStrict != VINF_SUCCESS)
8561 return rcStrict;
8562 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8563
8564 PVM pVM = pVCpu->CTX_SUFF(pVM);
8565#ifdef IEM_VERIFICATION_MODE_FULL
8566 /*
8567 * Detect problematic memory when verifying so we can select
8568 * the right execution engine. (TLB: Redo this.)
8569 */
8570 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8571 {
8572 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8573 if (RT_SUCCESS(rc2))
8574 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8575 if (RT_FAILURE(rc2))
8576 pVCpu->iem.s.fProblematicMemory = true;
8577 }
8578#endif
8579
8580
8581 /*
8582 * Read in the current memory content if it's a read, execute or partial
8583 * write access.
8584 */
8585 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8586 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8587 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8588
8589 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8590 {
8591 if (!pVCpu->iem.s.fBypassHandlers)
8592 {
8593 /*
8594 * Must carefully deal with access handler status codes here,
8595 * makes the code a bit bloated.
8596 */
8597 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8598 if (rcStrict == VINF_SUCCESS)
8599 {
8600 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8601 if (rcStrict == VINF_SUCCESS)
8602 { /*likely */ }
8603 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8604 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8605 else
8606 {
8607 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8608 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8609 return rcStrict;
8610 }
8611 }
8612 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8613 {
8614 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8615 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8616 {
8617 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8618 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8619 }
8620 else
8621 {
8622 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8623 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8624 return rcStrict2;
8625 }
8626 }
8627 else
8628 {
8629 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8630 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8631 return rcStrict;
8632 }
8633 }
8634 else
8635 {
8636 /*
8637 * No informational status codes here, much more straight forward.
8638 */
8639 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8640 if (RT_SUCCESS(rc))
8641 {
8642 Assert(rc == VINF_SUCCESS);
8643 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8644 if (RT_SUCCESS(rc))
8645 Assert(rc == VINF_SUCCESS);
8646 else
8647 {
8648 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8649 return rc;
8650 }
8651 }
8652 else
8653 {
8654 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8655 return rc;
8656 }
8657 }
8658
8659#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8660 if ( !pVCpu->iem.s.fNoRem
8661 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8662 {
8663 /*
8664 * Record the reads.
8665 */
8666 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8667 if (pEvtRec)
8668 {
8669 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8670 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8671 pEvtRec->u.RamRead.cb = cbFirstPage;
8672 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8673 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8674 }
8675 pEvtRec = iemVerifyAllocRecord(pVCpu);
8676 if (pEvtRec)
8677 {
8678 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8679 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8680 pEvtRec->u.RamRead.cb = cbSecondPage;
8681 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8682 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8683 }
8684 }
8685#endif
8686 }
8687#ifdef VBOX_STRICT
8688 else
8689 memset(pbBuf, 0xcc, cbMem);
8690 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8691 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8692#endif
8693
8694 /*
8695 * Commit the bounce buffer entry.
8696 */
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8699 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8700 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8701 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8702 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8703 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8704 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8705 pVCpu->iem.s.cActiveMappings++;
8706
8707 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8708 *ppvMem = pbBuf;
8709 return VINF_SUCCESS;
8710}
8711
8712
8713/**
8714 * iemMemMap woker that deals with iemMemPageMap failures.
8715 */
8716IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8717 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8718{
8719 /*
8720 * Filter out conditions we can handle and the ones which shouldn't happen.
8721 */
8722 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8723 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8724 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8725 {
8726 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8727 return rcMap;
8728 }
8729 pVCpu->iem.s.cPotentialExits++;
8730
8731 /*
8732 * Read in the current memory content if it's a read, execute or partial
8733 * write access.
8734 */
8735 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8736 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8737 {
8738 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8739 memset(pbBuf, 0xff, cbMem);
8740 else
8741 {
8742 int rc;
8743 if (!pVCpu->iem.s.fBypassHandlers)
8744 {
8745 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8746 if (rcStrict == VINF_SUCCESS)
8747 { /* nothing */ }
8748 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8749 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8750 else
8751 {
8752 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8753 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8754 return rcStrict;
8755 }
8756 }
8757 else
8758 {
8759 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8760 if (RT_SUCCESS(rc))
8761 { /* likely */ }
8762 else
8763 {
8764 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8765 GCPhysFirst, rc));
8766 return rc;
8767 }
8768 }
8769 }
8770
8771#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8772 if ( !pVCpu->iem.s.fNoRem
8773 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8774 {
8775 /*
8776 * Record the read.
8777 */
8778 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8779 if (pEvtRec)
8780 {
8781 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8782 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8783 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8784 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8785 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8786 }
8787 }
8788#endif
8789 }
8790#ifdef VBOX_STRICT
8791 else
8792 memset(pbBuf, 0xcc, cbMem);
8793#endif
8794#ifdef VBOX_STRICT
8795 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8796 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8797#endif
8798
8799 /*
8800 * Commit the bounce buffer entry.
8801 */
8802 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8803 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8804 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8805 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8806 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8807 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8808 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8809 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8810 pVCpu->iem.s.cActiveMappings++;
8811
8812 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8813 *ppvMem = pbBuf;
8814 return VINF_SUCCESS;
8815}
8816
8817
8818
8819/**
8820 * Maps the specified guest memory for the given kind of access.
8821 *
8822 * This may be using bounce buffering of the memory if it's crossing a page
8823 * boundary or if there is an access handler installed for any of it. Because
8824 * of lock prefix guarantees, we're in for some extra clutter when this
8825 * happens.
8826 *
8827 * This may raise a \#GP, \#SS, \#PF or \#AC.
8828 *
8829 * @returns VBox strict status code.
8830 *
8831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8832 * @param ppvMem Where to return the pointer to the mapped
8833 * memory.
8834 * @param cbMem The number of bytes to map. This is usually 1,
8835 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8836 * string operations it can be up to a page.
8837 * @param iSegReg The index of the segment register to use for
8838 * this access. The base and limits are checked.
8839 * Use UINT8_MAX to indicate that no segmentation
8840 * is required (for IDT, GDT and LDT accesses).
8841 * @param GCPtrMem The address of the guest memory.
8842 * @param fAccess How the memory is being accessed. The
8843 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8844 * how to map the memory, while the
8845 * IEM_ACCESS_WHAT_XXX bit is used when raising
8846 * exceptions.
8847 */
8848IEM_STATIC VBOXSTRICTRC
8849iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8850{
8851 /*
8852 * Check the input and figure out which mapping entry to use.
8853 */
8854 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8855 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8856 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8857
8858 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8859 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8860 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8861 {
8862 iMemMap = iemMemMapFindFree(pVCpu);
8863 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8864 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8865 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8866 pVCpu->iem.s.aMemMappings[2].fAccess),
8867 VERR_IEM_IPE_9);
8868 }
8869
8870 /*
8871 * Map the memory, checking that we can actually access it. If something
8872 * slightly complicated happens, fall back on bounce buffering.
8873 */
8874 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8875 if (rcStrict != VINF_SUCCESS)
8876 return rcStrict;
8877
8878 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8879 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8880
8881 RTGCPHYS GCPhysFirst;
8882 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8883 if (rcStrict != VINF_SUCCESS)
8884 return rcStrict;
8885
8886 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8887 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8888 if (fAccess & IEM_ACCESS_TYPE_READ)
8889 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8890
8891 void *pvMem;
8892 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8893 if (rcStrict != VINF_SUCCESS)
8894 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8895
8896 /*
8897 * Fill in the mapping table entry.
8898 */
8899 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8900 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8901 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8902 pVCpu->iem.s.cActiveMappings++;
8903
8904 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8905 *ppvMem = pvMem;
8906 return VINF_SUCCESS;
8907}
8908
8909
8910/**
8911 * Commits the guest memory if bounce buffered and unmaps it.
8912 *
8913 * @returns Strict VBox status code.
8914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8915 * @param pvMem The mapping.
8916 * @param fAccess The kind of access.
8917 */
8918IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8919{
8920 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8921 AssertReturn(iMemMap >= 0, iMemMap);
8922
8923 /* If it's bounce buffered, we may need to write back the buffer. */
8924 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8925 {
8926 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8927 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8928 }
8929 /* Otherwise unlock it. */
8930 else
8931 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8932
8933 /* Free the entry. */
8934 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8935 Assert(pVCpu->iem.s.cActiveMappings != 0);
8936 pVCpu->iem.s.cActiveMappings--;
8937 return VINF_SUCCESS;
8938}
8939
8940#ifdef IEM_WITH_SETJMP
8941
8942/**
8943 * Maps the specified guest memory for the given kind of access, longjmp on
8944 * error.
8945 *
8946 * This may be using bounce buffering of the memory if it's crossing a page
8947 * boundary or if there is an access handler installed for any of it. Because
8948 * of lock prefix guarantees, we're in for some extra clutter when this
8949 * happens.
8950 *
8951 * This may raise a \#GP, \#SS, \#PF or \#AC.
8952 *
8953 * @returns Pointer to the mapped memory.
8954 *
8955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8956 * @param cbMem The number of bytes to map. This is usually 1,
8957 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8958 * string operations it can be up to a page.
8959 * @param iSegReg The index of the segment register to use for
8960 * this access. The base and limits are checked.
8961 * Use UINT8_MAX to indicate that no segmentation
8962 * is required (for IDT, GDT and LDT accesses).
8963 * @param GCPtrMem The address of the guest memory.
8964 * @param fAccess How the memory is being accessed. The
8965 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8966 * how to map the memory, while the
8967 * IEM_ACCESS_WHAT_XXX bit is used when raising
8968 * exceptions.
8969 */
8970IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8971{
8972 /*
8973 * Check the input and figure out which mapping entry to use.
8974 */
8975 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8976 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8977 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8978
8979 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8980 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8981 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8982 {
8983 iMemMap = iemMemMapFindFree(pVCpu);
8984 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8985 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8986 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8987 pVCpu->iem.s.aMemMappings[2].fAccess),
8988 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8989 }
8990
8991 /*
8992 * Map the memory, checking that we can actually access it. If something
8993 * slightly complicated happens, fall back on bounce buffering.
8994 */
8995 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8996 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8997 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8998
8999 /* Crossing a page boundary? */
9000 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9001 { /* No (likely). */ }
9002 else
9003 {
9004 void *pvMem;
9005 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9006 if (rcStrict == VINF_SUCCESS)
9007 return pvMem;
9008 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9009 }
9010
9011 RTGCPHYS GCPhysFirst;
9012 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9013 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9014 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9015
9016 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9017 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9018 if (fAccess & IEM_ACCESS_TYPE_READ)
9019 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9020
9021 void *pvMem;
9022 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9023 if (rcStrict == VINF_SUCCESS)
9024 { /* likely */ }
9025 else
9026 {
9027 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9028 if (rcStrict == VINF_SUCCESS)
9029 return pvMem;
9030 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9031 }
9032
9033 /*
9034 * Fill in the mapping table entry.
9035 */
9036 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9037 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9038 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9039 pVCpu->iem.s.cActiveMappings++;
9040
9041 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9042 return pvMem;
9043}
9044
9045
9046/**
9047 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9048 *
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param pvMem The mapping.
9051 * @param fAccess The kind of access.
9052 */
9053IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9054{
9055 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9056 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9057
9058 /* If it's bounce buffered, we may need to write back the buffer. */
9059 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9060 {
9061 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9062 {
9063 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9064 if (rcStrict == VINF_SUCCESS)
9065 return;
9066 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9067 }
9068 }
9069 /* Otherwise unlock it. */
9070 else
9071 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9072
9073 /* Free the entry. */
9074 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9075 Assert(pVCpu->iem.s.cActiveMappings != 0);
9076 pVCpu->iem.s.cActiveMappings--;
9077}
9078
9079#endif
9080
9081#ifndef IN_RING3
9082/**
9083 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9084 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9085 *
9086 * Allows the instruction to be completed and retired, while the IEM user will
9087 * return to ring-3 immediately afterwards and do the postponed writes there.
9088 *
9089 * @returns VBox status code (no strict statuses). Caller must check
9090 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9092 * @param pvMem The mapping.
9093 * @param fAccess The kind of access.
9094 */
9095IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9096{
9097 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9098 AssertReturn(iMemMap >= 0, iMemMap);
9099
9100 /* If it's bounce buffered, we may need to write back the buffer. */
9101 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9102 {
9103 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9104 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9105 }
9106 /* Otherwise unlock it. */
9107 else
9108 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9109
9110 /* Free the entry. */
9111 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9112 Assert(pVCpu->iem.s.cActiveMappings != 0);
9113 pVCpu->iem.s.cActiveMappings--;
9114 return VINF_SUCCESS;
9115}
9116#endif
9117
9118
9119/**
9120 * Rollbacks mappings, releasing page locks and such.
9121 *
9122 * The caller shall only call this after checking cActiveMappings.
9123 *
9124 * @returns Strict VBox status code to pass up.
9125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9126 */
9127IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9128{
9129 Assert(pVCpu->iem.s.cActiveMappings > 0);
9130
9131 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9132 while (iMemMap-- > 0)
9133 {
9134 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9135 if (fAccess != IEM_ACCESS_INVALID)
9136 {
9137 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9138 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9139 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9140 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9141 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9142 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9143 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9144 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9145 pVCpu->iem.s.cActiveMappings--;
9146 }
9147 }
9148}
9149
9150
9151/**
9152 * Fetches a data byte.
9153 *
9154 * @returns Strict VBox status code.
9155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9156 * @param pu8Dst Where to return the byte.
9157 * @param iSegReg The index of the segment register to use for
9158 * this access. The base and limits are checked.
9159 * @param GCPtrMem The address of the guest memory.
9160 */
9161IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9162{
9163 /* The lazy approach for now... */
9164 uint8_t const *pu8Src;
9165 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9166 if (rc == VINF_SUCCESS)
9167 {
9168 *pu8Dst = *pu8Src;
9169 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9170 }
9171 return rc;
9172}
9173
9174
9175#ifdef IEM_WITH_SETJMP
9176/**
9177 * Fetches a data byte, longjmp on error.
9178 *
9179 * @returns The byte.
9180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9181 * @param iSegReg The index of the segment register to use for
9182 * this access. The base and limits are checked.
9183 * @param GCPtrMem The address of the guest memory.
9184 */
9185DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9186{
9187 /* The lazy approach for now... */
9188 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9189 uint8_t const bRet = *pu8Src;
9190 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9191 return bRet;
9192}
9193#endif /* IEM_WITH_SETJMP */
9194
9195
9196/**
9197 * Fetches a data word.
9198 *
9199 * @returns Strict VBox status code.
9200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9201 * @param pu16Dst Where to return the word.
9202 * @param iSegReg The index of the segment register to use for
9203 * this access. The base and limits are checked.
9204 * @param GCPtrMem The address of the guest memory.
9205 */
9206IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9207{
9208 /* The lazy approach for now... */
9209 uint16_t const *pu16Src;
9210 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9211 if (rc == VINF_SUCCESS)
9212 {
9213 *pu16Dst = *pu16Src;
9214 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9215 }
9216 return rc;
9217}
9218
9219
9220#ifdef IEM_WITH_SETJMP
9221/**
9222 * Fetches a data word, longjmp on error.
9223 *
9224 * @returns The word
9225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9226 * @param iSegReg The index of the segment register to use for
9227 * this access. The base and limits are checked.
9228 * @param GCPtrMem The address of the guest memory.
9229 */
9230DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9231{
9232 /* The lazy approach for now... */
9233 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9234 uint16_t const u16Ret = *pu16Src;
9235 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9236 return u16Ret;
9237}
9238#endif
9239
9240
9241/**
9242 * Fetches a data dword.
9243 *
9244 * @returns Strict VBox status code.
9245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9246 * @param pu32Dst Where to return the dword.
9247 * @param iSegReg The index of the segment register to use for
9248 * this access. The base and limits are checked.
9249 * @param GCPtrMem The address of the guest memory.
9250 */
9251IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9252{
9253 /* The lazy approach for now... */
9254 uint32_t const *pu32Src;
9255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9256 if (rc == VINF_SUCCESS)
9257 {
9258 *pu32Dst = *pu32Src;
9259 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9260 }
9261 return rc;
9262}
9263
9264
9265#ifdef IEM_WITH_SETJMP
9266
9267IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9268{
9269 Assert(cbMem >= 1);
9270 Assert(iSegReg < X86_SREG_COUNT);
9271
9272 /*
9273 * 64-bit mode is simpler.
9274 */
9275 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9276 {
9277 if (iSegReg >= X86_SREG_FS)
9278 {
9279 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9280 GCPtrMem += pSel->u64Base;
9281 }
9282
9283 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9284 return GCPtrMem;
9285 }
9286 /*
9287 * 16-bit and 32-bit segmentation.
9288 */
9289 else
9290 {
9291 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9292 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9293 == X86DESCATTR_P /* data, expand up */
9294 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9295 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9296 {
9297 /* expand up */
9298 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9299 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9300 && GCPtrLast32 > (uint32_t)GCPtrMem))
9301 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9302 }
9303 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9304 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9305 {
9306 /* expand down */
9307 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9308 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9309 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9310 && GCPtrLast32 > (uint32_t)GCPtrMem))
9311 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9312 }
9313 else
9314 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9315 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9316 }
9317 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9318}
9319
9320
9321IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9322{
9323 Assert(cbMem >= 1);
9324 Assert(iSegReg < X86_SREG_COUNT);
9325
9326 /*
9327 * 64-bit mode is simpler.
9328 */
9329 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9330 {
9331 if (iSegReg >= X86_SREG_FS)
9332 {
9333 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9334 GCPtrMem += pSel->u64Base;
9335 }
9336
9337 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9338 return GCPtrMem;
9339 }
9340 /*
9341 * 16-bit and 32-bit segmentation.
9342 */
9343 else
9344 {
9345 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9346 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9347 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9348 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9349 {
9350 /* expand up */
9351 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9352 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9353 && GCPtrLast32 > (uint32_t)GCPtrMem))
9354 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9355 }
9356 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9357 {
9358 /* expand down */
9359 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9360 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9361 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9362 && GCPtrLast32 > (uint32_t)GCPtrMem))
9363 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9364 }
9365 else
9366 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9367 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9368 }
9369 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9370}
9371
9372
9373/**
9374 * Fetches a data dword, longjmp on error, fallback/safe version.
9375 *
9376 * @returns The dword
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9385 uint32_t const u32Ret = *pu32Src;
9386 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9387 return u32Ret;
9388}
9389
9390
9391/**
9392 * Fetches a data dword, longjmp on error.
9393 *
9394 * @returns The dword
9395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9396 * @param iSegReg The index of the segment register to use for
9397 * this access. The base and limits are checked.
9398 * @param GCPtrMem The address of the guest memory.
9399 */
9400DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9401{
9402# ifdef IEM_WITH_DATA_TLB
9403 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9404 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9405 {
9406 /// @todo more later.
9407 }
9408
9409 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9410# else
9411 /* The lazy approach. */
9412 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9413 uint32_t const u32Ret = *pu32Src;
9414 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9415 return u32Ret;
9416# endif
9417}
9418#endif
9419
9420
9421#ifdef SOME_UNUSED_FUNCTION
9422/**
9423 * Fetches a data dword and sign extends it to a qword.
9424 *
9425 * @returns Strict VBox status code.
9426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9427 * @param pu64Dst Where to return the sign extended value.
9428 * @param iSegReg The index of the segment register to use for
9429 * this access. The base and limits are checked.
9430 * @param GCPtrMem The address of the guest memory.
9431 */
9432IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9433{
9434 /* The lazy approach for now... */
9435 int32_t const *pi32Src;
9436 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9437 if (rc == VINF_SUCCESS)
9438 {
9439 *pu64Dst = *pi32Src;
9440 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9441 }
9442#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9443 else
9444 *pu64Dst = 0;
9445#endif
9446 return rc;
9447}
9448#endif
9449
9450
9451/**
9452 * Fetches a data qword.
9453 *
9454 * @returns Strict VBox status code.
9455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9456 * @param pu64Dst Where to return the qword.
9457 * @param iSegReg The index of the segment register to use for
9458 * this access. The base and limits are checked.
9459 * @param GCPtrMem The address of the guest memory.
9460 */
9461IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9462{
9463 /* The lazy approach for now... */
9464 uint64_t const *pu64Src;
9465 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9466 if (rc == VINF_SUCCESS)
9467 {
9468 *pu64Dst = *pu64Src;
9469 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9470 }
9471 return rc;
9472}
9473
9474
9475#ifdef IEM_WITH_SETJMP
9476/**
9477 * Fetches a data qword, longjmp on error.
9478 *
9479 * @returns The qword.
9480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9481 * @param iSegReg The index of the segment register to use for
9482 * this access. The base and limits are checked.
9483 * @param GCPtrMem The address of the guest memory.
9484 */
9485DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9486{
9487 /* The lazy approach for now... */
9488 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9489 uint64_t const u64Ret = *pu64Src;
9490 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9491 return u64Ret;
9492}
9493#endif
9494
9495
9496/**
9497 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9498 *
9499 * @returns Strict VBox status code.
9500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9501 * @param pu64Dst Where to return the qword.
9502 * @param iSegReg The index of the segment register to use for
9503 * this access. The base and limits are checked.
9504 * @param GCPtrMem The address of the guest memory.
9505 */
9506IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9507{
9508 /* The lazy approach for now... */
9509 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9510 if (RT_UNLIKELY(GCPtrMem & 15))
9511 return iemRaiseGeneralProtectionFault0(pVCpu);
9512
9513 uint64_t const *pu64Src;
9514 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9515 if (rc == VINF_SUCCESS)
9516 {
9517 *pu64Dst = *pu64Src;
9518 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9519 }
9520 return rc;
9521}
9522
9523
9524#ifdef IEM_WITH_SETJMP
9525/**
9526 * Fetches a data qword, longjmp on error.
9527 *
9528 * @returns The qword.
9529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9530 * @param iSegReg The index of the segment register to use for
9531 * this access. The base and limits are checked.
9532 * @param GCPtrMem The address of the guest memory.
9533 */
9534DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9535{
9536 /* The lazy approach for now... */
9537 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9538 if (RT_LIKELY(!(GCPtrMem & 15)))
9539 {
9540 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9541 uint64_t const u64Ret = *pu64Src;
9542 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9543 return u64Ret;
9544 }
9545
9546 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9547 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9548}
9549#endif
9550
9551
9552/**
9553 * Fetches a data tword.
9554 *
9555 * @returns Strict VBox status code.
9556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9557 * @param pr80Dst Where to return the tword.
9558 * @param iSegReg The index of the segment register to use for
9559 * this access. The base and limits are checked.
9560 * @param GCPtrMem The address of the guest memory.
9561 */
9562IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9563{
9564 /* The lazy approach for now... */
9565 PCRTFLOAT80U pr80Src;
9566 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9567 if (rc == VINF_SUCCESS)
9568 {
9569 *pr80Dst = *pr80Src;
9570 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9571 }
9572 return rc;
9573}
9574
9575
9576#ifdef IEM_WITH_SETJMP
9577/**
9578 * Fetches a data tword, longjmp on error.
9579 *
9580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9581 * @param pr80Dst Where to return the tword.
9582 * @param iSegReg The index of the segment register to use for
9583 * this access. The base and limits are checked.
9584 * @param GCPtrMem The address of the guest memory.
9585 */
9586DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9587{
9588 /* The lazy approach for now... */
9589 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9590 *pr80Dst = *pr80Src;
9591 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9592}
9593#endif
9594
9595
9596/**
9597 * Fetches a data dqword (double qword), generally SSE related.
9598 *
9599 * @returns Strict VBox status code.
9600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9601 * @param pu128Dst Where to return the qword.
9602 * @param iSegReg The index of the segment register to use for
9603 * this access. The base and limits are checked.
9604 * @param GCPtrMem The address of the guest memory.
9605 */
9606IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9607{
9608 /* The lazy approach for now... */
9609 PCRTUINT128U pu128Src;
9610 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9611 if (rc == VINF_SUCCESS)
9612 {
9613 pu128Dst->au64[0] = pu128Src->au64[0];
9614 pu128Dst->au64[1] = pu128Src->au64[1];
9615 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9616 }
9617 return rc;
9618}
9619
9620
9621#ifdef IEM_WITH_SETJMP
9622/**
9623 * Fetches a data dqword (double qword), generally SSE related.
9624 *
9625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9626 * @param pu128Dst Where to return the qword.
9627 * @param iSegReg The index of the segment register to use for
9628 * this access. The base and limits are checked.
9629 * @param GCPtrMem The address of the guest memory.
9630 */
9631IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9632{
9633 /* The lazy approach for now... */
9634 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9635 pu128Dst->au64[0] = pu128Src->au64[0];
9636 pu128Dst->au64[1] = pu128Src->au64[1];
9637 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9638}
9639#endif
9640
9641
9642/**
9643 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9644 * related.
9645 *
9646 * Raises \#GP(0) if not aligned.
9647 *
9648 * @returns Strict VBox status code.
9649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9650 * @param pu128Dst Where to return the qword.
9651 * @param iSegReg The index of the segment register to use for
9652 * this access. The base and limits are checked.
9653 * @param GCPtrMem The address of the guest memory.
9654 */
9655IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9656{
9657 /* The lazy approach for now... */
9658 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9659 if ( (GCPtrMem & 15)
9660 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9661 return iemRaiseGeneralProtectionFault0(pVCpu);
9662
9663 PCRTUINT128U pu128Src;
9664 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9665 if (rc == VINF_SUCCESS)
9666 {
9667 pu128Dst->au64[0] = pu128Src->au64[0];
9668 pu128Dst->au64[1] = pu128Src->au64[1];
9669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9670 }
9671 return rc;
9672}
9673
9674
9675#ifdef IEM_WITH_SETJMP
9676/**
9677 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9678 * related, longjmp on error.
9679 *
9680 * Raises \#GP(0) if not aligned.
9681 *
9682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9683 * @param pu128Dst Where to return the qword.
9684 * @param iSegReg The index of the segment register to use for
9685 * this access. The base and limits are checked.
9686 * @param GCPtrMem The address of the guest memory.
9687 */
9688DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9689{
9690 /* The lazy approach for now... */
9691 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9692 if ( (GCPtrMem & 15) == 0
9693 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9694 {
9695 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9696 pu128Dst->au64[0] = pu128Src->au64[0];
9697 pu128Dst->au64[1] = pu128Src->au64[1];
9698 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9699 return;
9700 }
9701
9702 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9703 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9704}
9705#endif
9706
9707
9708/**
9709 * Fetches a data oword (octo word), generally AVX related.
9710 *
9711 * @returns Strict VBox status code.
9712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9713 * @param pu256Dst Where to return the qword.
9714 * @param iSegReg The index of the segment register to use for
9715 * this access. The base and limits are checked.
9716 * @param GCPtrMem The address of the guest memory.
9717 */
9718IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9719{
9720 /* The lazy approach for now... */
9721 PCRTUINT256U pu256Src;
9722 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9723 if (rc == VINF_SUCCESS)
9724 {
9725 pu256Dst->au64[0] = pu256Src->au64[0];
9726 pu256Dst->au64[1] = pu256Src->au64[1];
9727 pu256Dst->au64[2] = pu256Src->au64[2];
9728 pu256Dst->au64[3] = pu256Src->au64[3];
9729 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9730 }
9731 return rc;
9732}
9733
9734
9735#ifdef IEM_WITH_SETJMP
9736/**
9737 * Fetches a data oword (octo word), generally AVX related.
9738 *
9739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9740 * @param pu256Dst Where to return the qword.
9741 * @param iSegReg The index of the segment register to use for
9742 * this access. The base and limits are checked.
9743 * @param GCPtrMem The address of the guest memory.
9744 */
9745IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9746{
9747 /* The lazy approach for now... */
9748 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9749 pu256Dst->au64[0] = pu256Src->au64[0];
9750 pu256Dst->au64[1] = pu256Src->au64[1];
9751 pu256Dst->au64[2] = pu256Src->au64[2];
9752 pu256Dst->au64[3] = pu256Src->au64[3];
9753 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9754}
9755#endif
9756
9757
9758/**
9759 * Fetches a data oword (octo word) at an aligned address, generally AVX
9760 * related.
9761 *
9762 * Raises \#GP(0) if not aligned.
9763 *
9764 * @returns Strict VBox status code.
9765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9766 * @param pu256Dst Where to return the qword.
9767 * @param iSegReg The index of the segment register to use for
9768 * this access. The base and limits are checked.
9769 * @param GCPtrMem The address of the guest memory.
9770 */
9771IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9772{
9773 /* The lazy approach for now... */
9774 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9775 if (GCPtrMem & 31)
9776 return iemRaiseGeneralProtectionFault0(pVCpu);
9777
9778 PCRTUINT256U pu256Src;
9779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9780 if (rc == VINF_SUCCESS)
9781 {
9782 pu256Dst->au64[0] = pu256Src->au64[0];
9783 pu256Dst->au64[1] = pu256Src->au64[1];
9784 pu256Dst->au64[2] = pu256Src->au64[2];
9785 pu256Dst->au64[3] = pu256Src->au64[3];
9786 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9787 }
9788 return rc;
9789}
9790
9791
9792#ifdef IEM_WITH_SETJMP
9793/**
9794 * Fetches a data oword (octo word) at an aligned address, generally AVX
9795 * related, longjmp on error.
9796 *
9797 * Raises \#GP(0) if not aligned.
9798 *
9799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9800 * @param pu256Dst Where to return the qword.
9801 * @param iSegReg The index of the segment register to use for
9802 * this access. The base and limits are checked.
9803 * @param GCPtrMem The address of the guest memory.
9804 */
9805DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9806{
9807 /* The lazy approach for now... */
9808 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9809 if ((GCPtrMem & 31) == 0)
9810 {
9811 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9812 pu256Dst->au64[0] = pu256Src->au64[0];
9813 pu256Dst->au64[1] = pu256Src->au64[1];
9814 pu256Dst->au64[2] = pu256Src->au64[2];
9815 pu256Dst->au64[3] = pu256Src->au64[3];
9816 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9817 return;
9818 }
9819
9820 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9821 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9822}
9823#endif
9824
9825
9826
9827/**
9828 * Fetches a descriptor register (lgdt, lidt).
9829 *
9830 * @returns Strict VBox status code.
9831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9832 * @param pcbLimit Where to return the limit.
9833 * @param pGCPtrBase Where to return the base.
9834 * @param iSegReg The index of the segment register to use for
9835 * this access. The base and limits are checked.
9836 * @param GCPtrMem The address of the guest memory.
9837 * @param enmOpSize The effective operand size.
9838 */
9839IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9840 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9841{
9842 /*
9843 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9844 * little special:
9845 * - The two reads are done separately.
9846 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9847 * - We suspect the 386 to actually commit the limit before the base in
9848 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9849 * don't try emulate this eccentric behavior, because it's not well
9850 * enough understood and rather hard to trigger.
9851 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9852 */
9853 VBOXSTRICTRC rcStrict;
9854 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9855 {
9856 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9857 if (rcStrict == VINF_SUCCESS)
9858 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9859 }
9860 else
9861 {
9862 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9863 if (enmOpSize == IEMMODE_32BIT)
9864 {
9865 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9866 {
9867 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9868 if (rcStrict == VINF_SUCCESS)
9869 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9870 }
9871 else
9872 {
9873 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9874 if (rcStrict == VINF_SUCCESS)
9875 {
9876 *pcbLimit = (uint16_t)uTmp;
9877 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9878 }
9879 }
9880 if (rcStrict == VINF_SUCCESS)
9881 *pGCPtrBase = uTmp;
9882 }
9883 else
9884 {
9885 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9886 if (rcStrict == VINF_SUCCESS)
9887 {
9888 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9889 if (rcStrict == VINF_SUCCESS)
9890 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9891 }
9892 }
9893 }
9894 return rcStrict;
9895}
9896
9897
9898
9899/**
9900 * Stores a data byte.
9901 *
9902 * @returns Strict VBox status code.
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param iSegReg The index of the segment register to use for
9905 * this access. The base and limits are checked.
9906 * @param GCPtrMem The address of the guest memory.
9907 * @param u8Value The value to store.
9908 */
9909IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9910{
9911 /* The lazy approach for now... */
9912 uint8_t *pu8Dst;
9913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9914 if (rc == VINF_SUCCESS)
9915 {
9916 *pu8Dst = u8Value;
9917 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9918 }
9919 return rc;
9920}
9921
9922
9923#ifdef IEM_WITH_SETJMP
9924/**
9925 * Stores a data byte, longjmp on error.
9926 *
9927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9928 * @param iSegReg The index of the segment register to use for
9929 * this access. The base and limits are checked.
9930 * @param GCPtrMem The address of the guest memory.
9931 * @param u8Value The value to store.
9932 */
9933IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9934{
9935 /* The lazy approach for now... */
9936 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9937 *pu8Dst = u8Value;
9938 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9939}
9940#endif
9941
9942
9943/**
9944 * Stores a data word.
9945 *
9946 * @returns Strict VBox status code.
9947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9948 * @param iSegReg The index of the segment register to use for
9949 * this access. The base and limits are checked.
9950 * @param GCPtrMem The address of the guest memory.
9951 * @param u16Value The value to store.
9952 */
9953IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9954{
9955 /* The lazy approach for now... */
9956 uint16_t *pu16Dst;
9957 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9958 if (rc == VINF_SUCCESS)
9959 {
9960 *pu16Dst = u16Value;
9961 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9962 }
9963 return rc;
9964}
9965
9966
9967#ifdef IEM_WITH_SETJMP
9968/**
9969 * Stores a data word, longjmp on error.
9970 *
9971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9972 * @param iSegReg The index of the segment register to use for
9973 * this access. The base and limits are checked.
9974 * @param GCPtrMem The address of the guest memory.
9975 * @param u16Value The value to store.
9976 */
9977IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9978{
9979 /* The lazy approach for now... */
9980 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9981 *pu16Dst = u16Value;
9982 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9983}
9984#endif
9985
9986
9987/**
9988 * Stores a data dword.
9989 *
9990 * @returns Strict VBox status code.
9991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9992 * @param iSegReg The index of the segment register to use for
9993 * this access. The base and limits are checked.
9994 * @param GCPtrMem The address of the guest memory.
9995 * @param u32Value The value to store.
9996 */
9997IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9998{
9999 /* The lazy approach for now... */
10000 uint32_t *pu32Dst;
10001 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10002 if (rc == VINF_SUCCESS)
10003 {
10004 *pu32Dst = u32Value;
10005 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10006 }
10007 return rc;
10008}
10009
10010
10011#ifdef IEM_WITH_SETJMP
10012/**
10013 * Stores a data dword.
10014 *
10015 * @returns Strict VBox status code.
10016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10017 * @param iSegReg The index of the segment register to use for
10018 * this access. The base and limits are checked.
10019 * @param GCPtrMem The address of the guest memory.
10020 * @param u32Value The value to store.
10021 */
10022IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10023{
10024 /* The lazy approach for now... */
10025 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10026 *pu32Dst = u32Value;
10027 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10028}
10029#endif
10030
10031
10032/**
10033 * Stores a data qword.
10034 *
10035 * @returns Strict VBox status code.
10036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10037 * @param iSegReg The index of the segment register to use for
10038 * this access. The base and limits are checked.
10039 * @param GCPtrMem The address of the guest memory.
10040 * @param u64Value The value to store.
10041 */
10042IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10043{
10044 /* The lazy approach for now... */
10045 uint64_t *pu64Dst;
10046 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10047 if (rc == VINF_SUCCESS)
10048 {
10049 *pu64Dst = u64Value;
10050 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10051 }
10052 return rc;
10053}
10054
10055
10056#ifdef IEM_WITH_SETJMP
10057/**
10058 * Stores a data qword, longjmp on error.
10059 *
10060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10061 * @param iSegReg The index of the segment register to use for
10062 * this access. The base and limits are checked.
10063 * @param GCPtrMem The address of the guest memory.
10064 * @param u64Value The value to store.
10065 */
10066IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10067{
10068 /* The lazy approach for now... */
10069 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10070 *pu64Dst = u64Value;
10071 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10072}
10073#endif
10074
10075
10076/**
10077 * Stores a data dqword.
10078 *
10079 * @returns Strict VBox status code.
10080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10081 * @param iSegReg The index of the segment register to use for
10082 * this access. The base and limits are checked.
10083 * @param GCPtrMem The address of the guest memory.
10084 * @param u128Value The value to store.
10085 */
10086IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10087{
10088 /* The lazy approach for now... */
10089 PRTUINT128U pu128Dst;
10090 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10091 if (rc == VINF_SUCCESS)
10092 {
10093 pu128Dst->au64[0] = u128Value.au64[0];
10094 pu128Dst->au64[1] = u128Value.au64[1];
10095 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10096 }
10097 return rc;
10098}
10099
10100
10101#ifdef IEM_WITH_SETJMP
10102/**
10103 * Stores a data dqword, longjmp on error.
10104 *
10105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10106 * @param iSegReg The index of the segment register to use for
10107 * this access. The base and limits are checked.
10108 * @param GCPtrMem The address of the guest memory.
10109 * @param u128Value The value to store.
10110 */
10111IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10112{
10113 /* The lazy approach for now... */
10114 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10115 pu128Dst->au64[0] = u128Value.au64[0];
10116 pu128Dst->au64[1] = u128Value.au64[1];
10117 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10118}
10119#endif
10120
10121
10122/**
10123 * Stores a data dqword, SSE aligned.
10124 *
10125 * @returns Strict VBox status code.
10126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10127 * @param iSegReg The index of the segment register to use for
10128 * this access. The base and limits are checked.
10129 * @param GCPtrMem The address of the guest memory.
10130 * @param u128Value The value to store.
10131 */
10132IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10133{
10134 /* The lazy approach for now... */
10135 if ( (GCPtrMem & 15)
10136 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10137 return iemRaiseGeneralProtectionFault0(pVCpu);
10138
10139 PRTUINT128U pu128Dst;
10140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10141 if (rc == VINF_SUCCESS)
10142 {
10143 pu128Dst->au64[0] = u128Value.au64[0];
10144 pu128Dst->au64[1] = u128Value.au64[1];
10145 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10146 }
10147 return rc;
10148}
10149
10150
10151#ifdef IEM_WITH_SETJMP
10152/**
10153 * Stores a data dqword, SSE aligned.
10154 *
10155 * @returns Strict VBox status code.
10156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10157 * @param iSegReg The index of the segment register to use for
10158 * this access. The base and limits are checked.
10159 * @param GCPtrMem The address of the guest memory.
10160 * @param u128Value The value to store.
10161 */
10162DECL_NO_INLINE(IEM_STATIC, void)
10163iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10164{
10165 /* The lazy approach for now... */
10166 if ( (GCPtrMem & 15) == 0
10167 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10168 {
10169 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10170 pu128Dst->au64[0] = u128Value.au64[0];
10171 pu128Dst->au64[1] = u128Value.au64[1];
10172 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10173 return;
10174 }
10175
10176 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10177 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10178}
10179#endif
10180
10181
10182/**
10183 * Stores a data dqword.
10184 *
10185 * @returns Strict VBox status code.
10186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10187 * @param iSegReg The index of the segment register to use for
10188 * this access. The base and limits are checked.
10189 * @param GCPtrMem The address of the guest memory.
10190 * @param pu256Value Pointer to the value to store.
10191 */
10192IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10193{
10194 /* The lazy approach for now... */
10195 PRTUINT256U pu256Dst;
10196 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10197 if (rc == VINF_SUCCESS)
10198 {
10199 pu256Dst->au64[0] = pu256Value->au64[0];
10200 pu256Dst->au64[1] = pu256Value->au64[1];
10201 pu256Dst->au64[2] = pu256Value->au64[2];
10202 pu256Dst->au64[3] = pu256Value->au64[3];
10203 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10204 }
10205 return rc;
10206}
10207
10208
10209#ifdef IEM_WITH_SETJMP
10210/**
10211 * Stores a data dqword, longjmp on error.
10212 *
10213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10214 * @param iSegReg The index of the segment register to use for
10215 * this access. The base and limits are checked.
10216 * @param GCPtrMem The address of the guest memory.
10217 * @param pu256Value Pointer to the value to store.
10218 */
10219IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10220{
10221 /* The lazy approach for now... */
10222 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10223 pu256Dst->au64[0] = pu256Value->au64[0];
10224 pu256Dst->au64[1] = pu256Value->au64[1];
10225 pu256Dst->au64[2] = pu256Value->au64[2];
10226 pu256Dst->au64[3] = pu256Value->au64[3];
10227 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10228}
10229#endif
10230
10231
10232/**
10233 * Stores a data dqword, AVX aligned.
10234 *
10235 * @returns Strict VBox status code.
10236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10237 * @param iSegReg The index of the segment register to use for
10238 * this access. The base and limits are checked.
10239 * @param GCPtrMem The address of the guest memory.
10240 * @param pu256Value Pointer to the value to store.
10241 */
10242IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10243{
10244 /* The lazy approach for now... */
10245 if (GCPtrMem & 31)
10246 return iemRaiseGeneralProtectionFault0(pVCpu);
10247
10248 PRTUINT256U pu256Dst;
10249 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10250 if (rc == VINF_SUCCESS)
10251 {
10252 pu256Dst->au64[0] = pu256Value->au64[0];
10253 pu256Dst->au64[1] = pu256Value->au64[1];
10254 pu256Dst->au64[2] = pu256Value->au64[2];
10255 pu256Dst->au64[3] = pu256Value->au64[3];
10256 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10257 }
10258 return rc;
10259}
10260
10261
10262#ifdef IEM_WITH_SETJMP
10263/**
10264 * Stores a data dqword, AVX aligned.
10265 *
10266 * @returns Strict VBox status code.
10267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10268 * @param iSegReg The index of the segment register to use for
10269 * this access. The base and limits are checked.
10270 * @param GCPtrMem The address of the guest memory.
10271 * @param pu256Value Pointer to the value to store.
10272 */
10273DECL_NO_INLINE(IEM_STATIC, void)
10274iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10275{
10276 /* The lazy approach for now... */
10277 if ((GCPtrMem & 31) == 0)
10278 {
10279 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10280 pu256Dst->au64[0] = pu256Value->au64[0];
10281 pu256Dst->au64[1] = pu256Value->au64[1];
10282 pu256Dst->au64[2] = pu256Value->au64[2];
10283 pu256Dst->au64[3] = pu256Value->au64[3];
10284 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10285 return;
10286 }
10287
10288 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10289 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10290}
10291#endif
10292
10293
10294/**
10295 * Stores a descriptor register (sgdt, sidt).
10296 *
10297 * @returns Strict VBox status code.
10298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10299 * @param cbLimit The limit.
10300 * @param GCPtrBase The base address.
10301 * @param iSegReg The index of the segment register to use for
10302 * this access. The base and limits are checked.
10303 * @param GCPtrMem The address of the guest memory.
10304 */
10305IEM_STATIC VBOXSTRICTRC
10306iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10307{
10308 /*
10309 * The SIDT and SGDT instructions actually stores the data using two
10310 * independent writes. The instructions does not respond to opsize prefixes.
10311 */
10312 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10313 if (rcStrict == VINF_SUCCESS)
10314 {
10315 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10316 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10317 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10318 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10319 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10320 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10321 else
10322 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10323 }
10324 return rcStrict;
10325}
10326
10327
10328/**
10329 * Pushes a word onto the stack.
10330 *
10331 * @returns Strict VBox status code.
10332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10333 * @param u16Value The value to push.
10334 */
10335IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10336{
10337 /* Increment the stack pointer. */
10338 uint64_t uNewRsp;
10339 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10340 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10341
10342 /* Write the word the lazy way. */
10343 uint16_t *pu16Dst;
10344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10345 if (rc == VINF_SUCCESS)
10346 {
10347 *pu16Dst = u16Value;
10348 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10349 }
10350
10351 /* Commit the new RSP value unless we an access handler made trouble. */
10352 if (rc == VINF_SUCCESS)
10353 pCtx->rsp = uNewRsp;
10354
10355 return rc;
10356}
10357
10358
10359/**
10360 * Pushes a dword onto the stack.
10361 *
10362 * @returns Strict VBox status code.
10363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10364 * @param u32Value The value to push.
10365 */
10366IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10367{
10368 /* Increment the stack pointer. */
10369 uint64_t uNewRsp;
10370 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10371 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10372
10373 /* Write the dword the lazy way. */
10374 uint32_t *pu32Dst;
10375 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10376 if (rc == VINF_SUCCESS)
10377 {
10378 *pu32Dst = u32Value;
10379 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10380 }
10381
10382 /* Commit the new RSP value unless we an access handler made trouble. */
10383 if (rc == VINF_SUCCESS)
10384 pCtx->rsp = uNewRsp;
10385
10386 return rc;
10387}
10388
10389
10390/**
10391 * Pushes a dword segment register value onto the stack.
10392 *
10393 * @returns Strict VBox status code.
10394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10395 * @param u32Value The value to push.
10396 */
10397IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10398{
10399 /* Increment the stack pointer. */
10400 uint64_t uNewRsp;
10401 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10402 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10403
10404 VBOXSTRICTRC rc;
10405 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10406 {
10407 /* The recompiler writes a full dword. */
10408 uint32_t *pu32Dst;
10409 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10410 if (rc == VINF_SUCCESS)
10411 {
10412 *pu32Dst = u32Value;
10413 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10414 }
10415 }
10416 else
10417 {
10418 /* The intel docs talks about zero extending the selector register
10419 value. My actual intel CPU here might be zero extending the value
10420 but it still only writes the lower word... */
10421 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10422 * happens when crossing an electric page boundrary, is the high word checked
10423 * for write accessibility or not? Probably it is. What about segment limits?
10424 * It appears this behavior is also shared with trap error codes.
10425 *
10426 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10427 * ancient hardware when it actually did change. */
10428 uint16_t *pu16Dst;
10429 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10430 if (rc == VINF_SUCCESS)
10431 {
10432 *pu16Dst = (uint16_t)u32Value;
10433 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10434 }
10435 }
10436
10437 /* Commit the new RSP value unless we an access handler made trouble. */
10438 if (rc == VINF_SUCCESS)
10439 pCtx->rsp = uNewRsp;
10440
10441 return rc;
10442}
10443
10444
10445/**
10446 * Pushes a qword onto the stack.
10447 *
10448 * @returns Strict VBox status code.
10449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10450 * @param u64Value The value to push.
10451 */
10452IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10453{
10454 /* Increment the stack pointer. */
10455 uint64_t uNewRsp;
10456 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10457 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10458
10459 /* Write the word the lazy way. */
10460 uint64_t *pu64Dst;
10461 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10462 if (rc == VINF_SUCCESS)
10463 {
10464 *pu64Dst = u64Value;
10465 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10466 }
10467
10468 /* Commit the new RSP value unless we an access handler made trouble. */
10469 if (rc == VINF_SUCCESS)
10470 pCtx->rsp = uNewRsp;
10471
10472 return rc;
10473}
10474
10475
10476/**
10477 * Pops a word from the stack.
10478 *
10479 * @returns Strict VBox status code.
10480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10481 * @param pu16Value Where to store the popped value.
10482 */
10483IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10484{
10485 /* Increment the stack pointer. */
10486 uint64_t uNewRsp;
10487 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10488 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10489
10490 /* Write the word the lazy way. */
10491 uint16_t const *pu16Src;
10492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10493 if (rc == VINF_SUCCESS)
10494 {
10495 *pu16Value = *pu16Src;
10496 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10497
10498 /* Commit the new RSP value. */
10499 if (rc == VINF_SUCCESS)
10500 pCtx->rsp = uNewRsp;
10501 }
10502
10503 return rc;
10504}
10505
10506
10507/**
10508 * Pops a dword from the stack.
10509 *
10510 * @returns Strict VBox status code.
10511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10512 * @param pu32Value Where to store the popped value.
10513 */
10514IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10515{
10516 /* Increment the stack pointer. */
10517 uint64_t uNewRsp;
10518 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10519 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10520
10521 /* Write the word the lazy way. */
10522 uint32_t const *pu32Src;
10523 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10524 if (rc == VINF_SUCCESS)
10525 {
10526 *pu32Value = *pu32Src;
10527 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10528
10529 /* Commit the new RSP value. */
10530 if (rc == VINF_SUCCESS)
10531 pCtx->rsp = uNewRsp;
10532 }
10533
10534 return rc;
10535}
10536
10537
10538/**
10539 * Pops a qword from the stack.
10540 *
10541 * @returns Strict VBox status code.
10542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10543 * @param pu64Value Where to store the popped value.
10544 */
10545IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10546{
10547 /* Increment the stack pointer. */
10548 uint64_t uNewRsp;
10549 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10550 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10551
10552 /* Write the word the lazy way. */
10553 uint64_t const *pu64Src;
10554 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10555 if (rc == VINF_SUCCESS)
10556 {
10557 *pu64Value = *pu64Src;
10558 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10559
10560 /* Commit the new RSP value. */
10561 if (rc == VINF_SUCCESS)
10562 pCtx->rsp = uNewRsp;
10563 }
10564
10565 return rc;
10566}
10567
10568
10569/**
10570 * Pushes a word onto the stack, using a temporary stack pointer.
10571 *
10572 * @returns Strict VBox status code.
10573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10574 * @param u16Value The value to push.
10575 * @param pTmpRsp Pointer to the temporary stack pointer.
10576 */
10577IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10578{
10579 /* Increment the stack pointer. */
10580 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10581 RTUINT64U NewRsp = *pTmpRsp;
10582 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10583
10584 /* Write the word the lazy way. */
10585 uint16_t *pu16Dst;
10586 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10587 if (rc == VINF_SUCCESS)
10588 {
10589 *pu16Dst = u16Value;
10590 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10591 }
10592
10593 /* Commit the new RSP value unless we an access handler made trouble. */
10594 if (rc == VINF_SUCCESS)
10595 *pTmpRsp = NewRsp;
10596
10597 return rc;
10598}
10599
10600
10601/**
10602 * Pushes a dword onto the stack, using a temporary stack pointer.
10603 *
10604 * @returns Strict VBox status code.
10605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10606 * @param u32Value The value to push.
10607 * @param pTmpRsp Pointer to the temporary stack pointer.
10608 */
10609IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10610{
10611 /* Increment the stack pointer. */
10612 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10613 RTUINT64U NewRsp = *pTmpRsp;
10614 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10615
10616 /* Write the word the lazy way. */
10617 uint32_t *pu32Dst;
10618 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10619 if (rc == VINF_SUCCESS)
10620 {
10621 *pu32Dst = u32Value;
10622 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10623 }
10624
10625 /* Commit the new RSP value unless we an access handler made trouble. */
10626 if (rc == VINF_SUCCESS)
10627 *pTmpRsp = NewRsp;
10628
10629 return rc;
10630}
10631
10632
10633/**
10634 * Pushes a dword onto the stack, using a temporary stack pointer.
10635 *
10636 * @returns Strict VBox status code.
10637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10638 * @param u64Value The value to push.
10639 * @param pTmpRsp Pointer to the temporary stack pointer.
10640 */
10641IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10642{
10643 /* Increment the stack pointer. */
10644 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10645 RTUINT64U NewRsp = *pTmpRsp;
10646 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10647
10648 /* Write the word the lazy way. */
10649 uint64_t *pu64Dst;
10650 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10651 if (rc == VINF_SUCCESS)
10652 {
10653 *pu64Dst = u64Value;
10654 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10655 }
10656
10657 /* Commit the new RSP value unless we an access handler made trouble. */
10658 if (rc == VINF_SUCCESS)
10659 *pTmpRsp = NewRsp;
10660
10661 return rc;
10662}
10663
10664
10665/**
10666 * Pops a word from the stack, using a temporary stack pointer.
10667 *
10668 * @returns Strict VBox status code.
10669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10670 * @param pu16Value Where to store the popped value.
10671 * @param pTmpRsp Pointer to the temporary stack pointer.
10672 */
10673IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10674{
10675 /* Increment the stack pointer. */
10676 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10677 RTUINT64U NewRsp = *pTmpRsp;
10678 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10679
10680 /* Write the word the lazy way. */
10681 uint16_t const *pu16Src;
10682 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10683 if (rc == VINF_SUCCESS)
10684 {
10685 *pu16Value = *pu16Src;
10686 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10687
10688 /* Commit the new RSP value. */
10689 if (rc == VINF_SUCCESS)
10690 *pTmpRsp = NewRsp;
10691 }
10692
10693 return rc;
10694}
10695
10696
10697/**
10698 * Pops a dword from the stack, using a temporary stack pointer.
10699 *
10700 * @returns Strict VBox status code.
10701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10702 * @param pu32Value Where to store the popped value.
10703 * @param pTmpRsp Pointer to the temporary stack pointer.
10704 */
10705IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10706{
10707 /* Increment the stack pointer. */
10708 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10709 RTUINT64U NewRsp = *pTmpRsp;
10710 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10711
10712 /* Write the word the lazy way. */
10713 uint32_t const *pu32Src;
10714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10715 if (rc == VINF_SUCCESS)
10716 {
10717 *pu32Value = *pu32Src;
10718 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10719
10720 /* Commit the new RSP value. */
10721 if (rc == VINF_SUCCESS)
10722 *pTmpRsp = NewRsp;
10723 }
10724
10725 return rc;
10726}
10727
10728
10729/**
10730 * Pops a qword from the stack, using a temporary stack pointer.
10731 *
10732 * @returns Strict VBox status code.
10733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10734 * @param pu64Value Where to store the popped value.
10735 * @param pTmpRsp Pointer to the temporary stack pointer.
10736 */
10737IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10738{
10739 /* Increment the stack pointer. */
10740 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10741 RTUINT64U NewRsp = *pTmpRsp;
10742 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10743
10744 /* Write the word the lazy way. */
10745 uint64_t const *pu64Src;
10746 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10747 if (rcStrict == VINF_SUCCESS)
10748 {
10749 *pu64Value = *pu64Src;
10750 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10751
10752 /* Commit the new RSP value. */
10753 if (rcStrict == VINF_SUCCESS)
10754 *pTmpRsp = NewRsp;
10755 }
10756
10757 return rcStrict;
10758}
10759
10760
10761/**
10762 * Begin a special stack push (used by interrupt, exceptions and such).
10763 *
10764 * This will raise \#SS or \#PF if appropriate.
10765 *
10766 * @returns Strict VBox status code.
10767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10768 * @param cbMem The number of bytes to push onto the stack.
10769 * @param ppvMem Where to return the pointer to the stack memory.
10770 * As with the other memory functions this could be
10771 * direct access or bounce buffered access, so
10772 * don't commit register until the commit call
10773 * succeeds.
10774 * @param puNewRsp Where to return the new RSP value. This must be
10775 * passed unchanged to
10776 * iemMemStackPushCommitSpecial().
10777 */
10778IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10779{
10780 Assert(cbMem < UINT8_MAX);
10781 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10782 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10783 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10784}
10785
10786
10787/**
10788 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10789 *
10790 * This will update the rSP.
10791 *
10792 * @returns Strict VBox status code.
10793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10794 * @param pvMem The pointer returned by
10795 * iemMemStackPushBeginSpecial().
10796 * @param uNewRsp The new RSP value returned by
10797 * iemMemStackPushBeginSpecial().
10798 */
10799IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10800{
10801 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10802 if (rcStrict == VINF_SUCCESS)
10803 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10804 return rcStrict;
10805}
10806
10807
10808/**
10809 * Begin a special stack pop (used by iret, retf and such).
10810 *
10811 * This will raise \#SS or \#PF if appropriate.
10812 *
10813 * @returns Strict VBox status code.
10814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10815 * @param cbMem The number of bytes to pop from the stack.
10816 * @param ppvMem Where to return the pointer to the stack memory.
10817 * @param puNewRsp Where to return the new RSP value. This must be
10818 * assigned to CPUMCTX::rsp manually some time
10819 * after iemMemStackPopDoneSpecial() has been
10820 * called.
10821 */
10822IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10823{
10824 Assert(cbMem < UINT8_MAX);
10825 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10826 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10827 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10828}
10829
10830
10831/**
10832 * Continue a special stack pop (used by iret and retf).
10833 *
10834 * This will raise \#SS or \#PF if appropriate.
10835 *
10836 * @returns Strict VBox status code.
10837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10838 * @param cbMem The number of bytes to pop from the stack.
10839 * @param ppvMem Where to return the pointer to the stack memory.
10840 * @param puNewRsp Where to return the new RSP value. This must be
10841 * assigned to CPUMCTX::rsp manually some time
10842 * after iemMemStackPopDoneSpecial() has been
10843 * called.
10844 */
10845IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10846{
10847 Assert(cbMem < UINT8_MAX);
10848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10849 RTUINT64U NewRsp;
10850 NewRsp.u = *puNewRsp;
10851 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10852 *puNewRsp = NewRsp.u;
10853 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10854}
10855
10856
10857/**
10858 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10859 * iemMemStackPopContinueSpecial).
10860 *
10861 * The caller will manually commit the rSP.
10862 *
10863 * @returns Strict VBox status code.
10864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10865 * @param pvMem The pointer returned by
10866 * iemMemStackPopBeginSpecial() or
10867 * iemMemStackPopContinueSpecial().
10868 */
10869IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10870{
10871 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10872}
10873
10874
10875/**
10876 * Fetches a system table byte.
10877 *
10878 * @returns Strict VBox status code.
10879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10880 * @param pbDst Where to return the byte.
10881 * @param iSegReg The index of the segment register to use for
10882 * this access. The base and limits are checked.
10883 * @param GCPtrMem The address of the guest memory.
10884 */
10885IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10886{
10887 /* The lazy approach for now... */
10888 uint8_t const *pbSrc;
10889 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10890 if (rc == VINF_SUCCESS)
10891 {
10892 *pbDst = *pbSrc;
10893 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10894 }
10895 return rc;
10896}
10897
10898
10899/**
10900 * Fetches a system table word.
10901 *
10902 * @returns Strict VBox status code.
10903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10904 * @param pu16Dst Where to return the word.
10905 * @param iSegReg The index of the segment register to use for
10906 * this access. The base and limits are checked.
10907 * @param GCPtrMem The address of the guest memory.
10908 */
10909IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10910{
10911 /* The lazy approach for now... */
10912 uint16_t const *pu16Src;
10913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10914 if (rc == VINF_SUCCESS)
10915 {
10916 *pu16Dst = *pu16Src;
10917 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10918 }
10919 return rc;
10920}
10921
10922
10923/**
10924 * Fetches a system table dword.
10925 *
10926 * @returns Strict VBox status code.
10927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10928 * @param pu32Dst Where to return the dword.
10929 * @param iSegReg The index of the segment register to use for
10930 * this access. The base and limits are checked.
10931 * @param GCPtrMem The address of the guest memory.
10932 */
10933IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10934{
10935 /* The lazy approach for now... */
10936 uint32_t const *pu32Src;
10937 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10938 if (rc == VINF_SUCCESS)
10939 {
10940 *pu32Dst = *pu32Src;
10941 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10942 }
10943 return rc;
10944}
10945
10946
10947/**
10948 * Fetches a system table qword.
10949 *
10950 * @returns Strict VBox status code.
10951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10952 * @param pu64Dst Where to return the qword.
10953 * @param iSegReg The index of the segment register to use for
10954 * this access. The base and limits are checked.
10955 * @param GCPtrMem The address of the guest memory.
10956 */
10957IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10958{
10959 /* The lazy approach for now... */
10960 uint64_t const *pu64Src;
10961 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10962 if (rc == VINF_SUCCESS)
10963 {
10964 *pu64Dst = *pu64Src;
10965 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10966 }
10967 return rc;
10968}
10969
10970
10971/**
10972 * Fetches a descriptor table entry with caller specified error code.
10973 *
10974 * @returns Strict VBox status code.
10975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10976 * @param pDesc Where to return the descriptor table entry.
10977 * @param uSel The selector which table entry to fetch.
10978 * @param uXcpt The exception to raise on table lookup error.
10979 * @param uErrorCode The error code associated with the exception.
10980 */
10981IEM_STATIC VBOXSTRICTRC
10982iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10983{
10984 AssertPtr(pDesc);
10985 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10986
10987 /** @todo did the 286 require all 8 bytes to be accessible? */
10988 /*
10989 * Get the selector table base and check bounds.
10990 */
10991 RTGCPTR GCPtrBase;
10992 if (uSel & X86_SEL_LDT)
10993 {
10994 if ( !pCtx->ldtr.Attr.n.u1Present
10995 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10996 {
10997 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10998 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10999 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11000 uErrorCode, 0);
11001 }
11002
11003 Assert(pCtx->ldtr.Attr.n.u1Present);
11004 GCPtrBase = pCtx->ldtr.u64Base;
11005 }
11006 else
11007 {
11008 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11009 {
11010 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11011 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11012 uErrorCode, 0);
11013 }
11014 GCPtrBase = pCtx->gdtr.pGdt;
11015 }
11016
11017 /*
11018 * Read the legacy descriptor and maybe the long mode extensions if
11019 * required.
11020 */
11021 VBOXSTRICTRC rcStrict;
11022 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11023 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11024 else
11025 {
11026 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11027 if (rcStrict == VINF_SUCCESS)
11028 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11029 if (rcStrict == VINF_SUCCESS)
11030 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11031 if (rcStrict == VINF_SUCCESS)
11032 pDesc->Legacy.au16[3] = 0;
11033 else
11034 return rcStrict;
11035 }
11036
11037 if (rcStrict == VINF_SUCCESS)
11038 {
11039 if ( !IEM_IS_LONG_MODE(pVCpu)
11040 || pDesc->Legacy.Gen.u1DescType)
11041 pDesc->Long.au64[1] = 0;
11042 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11043 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11044 else
11045 {
11046 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11047 /** @todo is this the right exception? */
11048 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11049 }
11050 }
11051 return rcStrict;
11052}
11053
11054
11055/**
11056 * Fetches a descriptor table entry.
11057 *
11058 * @returns Strict VBox status code.
11059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11060 * @param pDesc Where to return the descriptor table entry.
11061 * @param uSel The selector which table entry to fetch.
11062 * @param uXcpt The exception to raise on table lookup error.
11063 */
11064IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11065{
11066 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11067}
11068
11069
11070/**
11071 * Fakes a long mode stack selector for SS = 0.
11072 *
11073 * @param pDescSs Where to return the fake stack descriptor.
11074 * @param uDpl The DPL we want.
11075 */
11076IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11077{
11078 pDescSs->Long.au64[0] = 0;
11079 pDescSs->Long.au64[1] = 0;
11080 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11081 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11082 pDescSs->Long.Gen.u2Dpl = uDpl;
11083 pDescSs->Long.Gen.u1Present = 1;
11084 pDescSs->Long.Gen.u1Long = 1;
11085}
11086
11087
11088/**
11089 * Marks the selector descriptor as accessed (only non-system descriptors).
11090 *
11091 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11092 * will therefore skip the limit checks.
11093 *
11094 * @returns Strict VBox status code.
11095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11096 * @param uSel The selector.
11097 */
11098IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11099{
11100 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11101
11102 /*
11103 * Get the selector table base and calculate the entry address.
11104 */
11105 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11106 ? pCtx->ldtr.u64Base
11107 : pCtx->gdtr.pGdt;
11108 GCPtr += uSel & X86_SEL_MASK;
11109
11110 /*
11111 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11112 * ugly stuff to avoid this. This will make sure it's an atomic access
11113 * as well more or less remove any question about 8-bit or 32-bit accesss.
11114 */
11115 VBOXSTRICTRC rcStrict;
11116 uint32_t volatile *pu32;
11117 if ((GCPtr & 3) == 0)
11118 {
11119 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11120 GCPtr += 2 + 2;
11121 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11122 if (rcStrict != VINF_SUCCESS)
11123 return rcStrict;
11124 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11125 }
11126 else
11127 {
11128 /* The misaligned GDT/LDT case, map the whole thing. */
11129 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11130 if (rcStrict != VINF_SUCCESS)
11131 return rcStrict;
11132 switch ((uintptr_t)pu32 & 3)
11133 {
11134 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11135 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11136 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11137 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11138 }
11139 }
11140
11141 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11142}
11143
11144/** @} */
11145
11146
11147/*
11148 * Include the C/C++ implementation of instruction.
11149 */
11150#include "IEMAllCImpl.cpp.h"
11151
11152
11153
11154/** @name "Microcode" macros.
11155 *
11156 * The idea is that we should be able to use the same code to interpret
11157 * instructions as well as recompiler instructions. Thus this obfuscation.
11158 *
11159 * @{
11160 */
11161#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11162#define IEM_MC_END() }
11163#define IEM_MC_PAUSE() do {} while (0)
11164#define IEM_MC_CONTINUE() do {} while (0)
11165
11166/** Internal macro. */
11167#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11168 do \
11169 { \
11170 VBOXSTRICTRC rcStrict2 = a_Expr; \
11171 if (rcStrict2 != VINF_SUCCESS) \
11172 return rcStrict2; \
11173 } while (0)
11174
11175
11176#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11177#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11178#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11179#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11180#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11181#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11182#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11183#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11184#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11185 do { \
11186 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11187 return iemRaiseDeviceNotAvailable(pVCpu); \
11188 } while (0)
11189#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11190 do { \
11191 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11192 return iemRaiseDeviceNotAvailable(pVCpu); \
11193 } while (0)
11194#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11195 do { \
11196 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11197 return iemRaiseMathFault(pVCpu); \
11198 } while (0)
11199#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11200 do { \
11201 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11202 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11203 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11204 return iemRaiseUndefinedOpcode(pVCpu); \
11205 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11206 return iemRaiseDeviceNotAvailable(pVCpu); \
11207 } while (0)
11208#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11209 do { \
11210 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11211 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11212 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11213 return iemRaiseUndefinedOpcode(pVCpu); \
11214 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11215 return iemRaiseDeviceNotAvailable(pVCpu); \
11216 } while (0)
11217#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11218 do { \
11219 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11220 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11221 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11222 return iemRaiseUndefinedOpcode(pVCpu); \
11223 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11224 return iemRaiseDeviceNotAvailable(pVCpu); \
11225 } while (0)
11226#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11227 do { \
11228 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11229 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11230 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11231 return iemRaiseUndefinedOpcode(pVCpu); \
11232 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11233 return iemRaiseDeviceNotAvailable(pVCpu); \
11234 } while (0)
11235#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11236 do { \
11237 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11238 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11239 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11240 return iemRaiseUndefinedOpcode(pVCpu); \
11241 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11242 return iemRaiseDeviceNotAvailable(pVCpu); \
11243 } while (0)
11244#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11245 do { \
11246 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11247 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11248 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11249 return iemRaiseUndefinedOpcode(pVCpu); \
11250 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11251 return iemRaiseDeviceNotAvailable(pVCpu); \
11252 } while (0)
11253#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11254 do { \
11255 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11256 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11257 return iemRaiseUndefinedOpcode(pVCpu); \
11258 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11259 return iemRaiseDeviceNotAvailable(pVCpu); \
11260 } while (0)
11261#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11262 do { \
11263 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11264 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11265 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11266 return iemRaiseUndefinedOpcode(pVCpu); \
11267 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11268 return iemRaiseDeviceNotAvailable(pVCpu); \
11269 } while (0)
11270#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11271 do { \
11272 if (pVCpu->iem.s.uCpl != 0) \
11273 return iemRaiseGeneralProtectionFault0(pVCpu); \
11274 } while (0)
11275#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11276 do { \
11277 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11278 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11279 } while (0)
11280#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11281 do { \
11282 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11283 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11284 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11285 return iemRaiseUndefinedOpcode(pVCpu); \
11286 } while (0)
11287#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11288 do { \
11289 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11290 return iemRaiseGeneralProtectionFault0(pVCpu); \
11291 } while (0)
11292
11293
11294#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11295#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11296#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11297#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11298#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11299#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11300#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11301 uint32_t a_Name; \
11302 uint32_t *a_pName = &a_Name
11303#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11304 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11305
11306#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11307#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11308
11309#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11318#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11319#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11320#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11321#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11322#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11323#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11324#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11325#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11326#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11327#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11328#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11329#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11330#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11331#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11332#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11333#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11334#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11335#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11336#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11337#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11338#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11339#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11340/** @note Not for IOPL or IF testing or modification. */
11341#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11342#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11343#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11344#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11345
11346#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11347#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11348#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11349#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11350#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11351#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11352#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11353#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11354#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11355#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11356#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11357#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11358#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11359 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11360
11361
11362#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11363#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11364/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11365 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11366#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11367#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11368/** @note Not for IOPL or IF testing or modification. */
11369#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11370
11371#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11372#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11373#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11374 do { \
11375 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11376 *pu32Reg += (a_u32Value); \
11377 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11378 } while (0)
11379#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11380
11381#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11382#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11383#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11384 do { \
11385 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11386 *pu32Reg -= (a_u32Value); \
11387 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11388 } while (0)
11389#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11390#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11391
11392#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11393#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11394#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11395#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11396#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11397#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11398#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11399
11400#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11401#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11402#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11403#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11404
11405#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11406#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11407#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11408
11409#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11410#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11411#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11412
11413#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11414#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11415#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11416
11417#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11418#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11419#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11420
11421#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11422
11423#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11424
11425#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11426#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11427#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11428 do { \
11429 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11430 *pu32Reg &= (a_u32Value); \
11431 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11432 } while (0)
11433#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11434
11435#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11436#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11437#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11438 do { \
11439 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11440 *pu32Reg |= (a_u32Value); \
11441 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11442 } while (0)
11443#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11444
11445
11446/** @note Not for IOPL or IF modification. */
11447#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11448/** @note Not for IOPL or IF modification. */
11449#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11450/** @note Not for IOPL or IF modification. */
11451#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11452
11453#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11454
11455/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11456#define IEM_MC_FPU_TO_MMX_MODE() do { \
11457 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11458 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11459 } while (0)
11460
11461/** Switches the FPU state from MMX mode (FTW=0xffff). */
11462#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11463 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11464 } while (0)
11465
11466#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11467 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11468#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11469 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11470#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11471 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11472 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11473 } while (0)
11474#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11475 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11476 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11477 } while (0)
11478#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11479 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11480#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11481 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11482#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11483 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11484
11485#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11486 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11487 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11488 } while (0)
11489#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11490 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11491#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11492 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11493#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11494 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11495#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11496 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11497 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11498 } while (0)
11499#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11500 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11501#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11502 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11503 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11504 } while (0)
11505#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11506 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11507#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11508 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11509 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11510 } while (0)
11511#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11512 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11513#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11514 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11515#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11516 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11517#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11518 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11519#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11520 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11521 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11522 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11523 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11524 } while (0)
11525
11526#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11527 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11528 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11529 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11530 } while (0)
11531#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11532 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11533 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11534 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11535 } while (0)
11536#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11537 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11538 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11539 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11540 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11541 } while (0)
11542#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11543 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11544 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11545 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11546 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11547 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11548 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11549 } while (0)
11550
11551#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11552#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11553 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11554 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11556 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11558 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11560 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11561 } while (0)
11562#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11563 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11564 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11569 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11570 } while (0)
11571#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11572 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11573 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11578 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11579 } while (0)
11580#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11581 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11582 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11587 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11588 } while (0)
11589
11590#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11591 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11592#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11593 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11594#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11595 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11596#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11597 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11598 uintptr_t const iYRegTmp = (a_iYReg); \
11599 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11600 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11602 } while (0)
11603
11604#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11605 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11606 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11607 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11608 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11609 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11613 } while (0)
11614#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11615 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11616 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11617 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11619 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11621 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11622 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11623 } while (0)
11624#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11625 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11626 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11627 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11628 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11630 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11631 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11632 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11633 } while (0)
11634
11635#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11636 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11637 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11638 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11639 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11641 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11642 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11643 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11644 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11645 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11646 } while (0)
11647#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11648 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11649 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11650 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11651 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11652 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11653 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11654 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11655 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11656 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11657 } while (0)
11658#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11659 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11660 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11661 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11662 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11663 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11664 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11665 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11666 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11667 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11668 } while (0)
11669#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11670 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11671 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11672 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11673 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11674 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11675 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11676 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11677 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11678 } while (0)
11679
11680#ifndef IEM_WITH_SETJMP
11681# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11683# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11685# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11687#else
11688# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11689 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11690# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11691 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11692# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11693 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11694#endif
11695
11696#ifndef IEM_WITH_SETJMP
11697# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11701# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11703#else
11704# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11705 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11706# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11707 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11708# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11709 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11710#endif
11711
11712#ifndef IEM_WITH_SETJMP
11713# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11715# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11717# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11719#else
11720# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11721 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11722# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11723 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11724# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11725 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11726#endif
11727
11728#ifdef SOME_UNUSED_FUNCTION
11729# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11731#endif
11732
11733#ifndef IEM_WITH_SETJMP
11734# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11736# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11738# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11740# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11742#else
11743# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11744 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11745# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11746 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11747# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11748 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11749# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751#endif
11752
11753#ifndef IEM_WITH_SETJMP
11754# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11758# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11760#else
11761# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11766 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11767#endif
11768
11769#ifndef IEM_WITH_SETJMP
11770# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11772# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11774#else
11775# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11776 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11777# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11778 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11779#endif
11780
11781#ifndef IEM_WITH_SETJMP
11782# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11784# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11786#else
11787# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11788 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11789# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11790 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11791#endif
11792
11793
11794
11795#ifndef IEM_WITH_SETJMP
11796# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11797 do { \
11798 uint8_t u8Tmp; \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11800 (a_u16Dst) = u8Tmp; \
11801 } while (0)
11802# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint8_t u8Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u32Dst) = u8Tmp; \
11807 } while (0)
11808# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11809 do { \
11810 uint8_t u8Tmp; \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11812 (a_u64Dst) = u8Tmp; \
11813 } while (0)
11814# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11815 do { \
11816 uint16_t u16Tmp; \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11818 (a_u32Dst) = u16Tmp; \
11819 } while (0)
11820# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint16_t u16Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u64Dst) = u16Tmp; \
11825 } while (0)
11826# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11827 do { \
11828 uint32_t u32Tmp; \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11830 (a_u64Dst) = u32Tmp; \
11831 } while (0)
11832#else /* IEM_WITH_SETJMP */
11833# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11834 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11835# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11836 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11837# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11838 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11839# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11840 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11841# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845#endif /* IEM_WITH_SETJMP */
11846
11847#ifndef IEM_WITH_SETJMP
11848# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11849 do { \
11850 uint8_t u8Tmp; \
11851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11852 (a_u16Dst) = (int8_t)u8Tmp; \
11853 } while (0)
11854# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11855 do { \
11856 uint8_t u8Tmp; \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11858 (a_u32Dst) = (int8_t)u8Tmp; \
11859 } while (0)
11860# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11861 do { \
11862 uint8_t u8Tmp; \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11864 (a_u64Dst) = (int8_t)u8Tmp; \
11865 } while (0)
11866# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11867 do { \
11868 uint16_t u16Tmp; \
11869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11870 (a_u32Dst) = (int16_t)u16Tmp; \
11871 } while (0)
11872# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11873 do { \
11874 uint16_t u16Tmp; \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11876 (a_u64Dst) = (int16_t)u16Tmp; \
11877 } while (0)
11878# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11879 do { \
11880 uint32_t u32Tmp; \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11882 (a_u64Dst) = (int32_t)u32Tmp; \
11883 } while (0)
11884#else /* IEM_WITH_SETJMP */
11885# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11886 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11887# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11888 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11889# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11890 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11891# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11892 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11893# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11894 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11895# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11896 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11897#endif /* IEM_WITH_SETJMP */
11898
11899#ifndef IEM_WITH_SETJMP
11900# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11902# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11904# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11906# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11908#else
11909# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11910 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11911# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11912 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11913# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11914 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11915# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11916 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11917#endif
11918
11919#ifndef IEM_WITH_SETJMP
11920# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11922# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11924# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11926# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11928#else
11929# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11930 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11931# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11932 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11933# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11934 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11935# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11936 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11937#endif
11938
11939#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11940#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11941#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11942#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11943#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11944#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11945#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11946 do { \
11947 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11948 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11949 } while (0)
11950
11951#ifndef IEM_WITH_SETJMP
11952# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11954# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11956#else
11957# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11958 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11959# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11960 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11961#endif
11962
11963#ifndef IEM_WITH_SETJMP
11964# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11965 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11966# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11967 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11968#else
11969# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11970 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11971# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11972 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11973#endif
11974
11975
11976#define IEM_MC_PUSH_U16(a_u16Value) \
11977 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11978#define IEM_MC_PUSH_U32(a_u32Value) \
11979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11980#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11981 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11982#define IEM_MC_PUSH_U64(a_u64Value) \
11983 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11984
11985#define IEM_MC_POP_U16(a_pu16Value) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11987#define IEM_MC_POP_U32(a_pu32Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11989#define IEM_MC_POP_U64(a_pu64Value) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11991
11992/** Maps guest memory for direct or bounce buffered access.
11993 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11994 * @remarks May return.
11995 */
11996#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11997 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11998
11999/** Maps guest memory for direct or bounce buffered access.
12000 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12001 * @remarks May return.
12002 */
12003#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12004 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12005
12006/** Commits the memory and unmaps the guest memory.
12007 * @remarks May return.
12008 */
12009#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12010 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12011
12012/** Commits the memory and unmaps the guest memory unless the FPU status word
12013 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12014 * that would cause FLD not to store.
12015 *
12016 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12017 * store, while \#P will not.
12018 *
12019 * @remarks May in theory return - for now.
12020 */
12021#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12022 do { \
12023 if ( !(a_u16FSW & X86_FSW_ES) \
12024 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12025 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12026 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12027 } while (0)
12028
12029/** Calculate efficient address from R/M. */
12030#ifndef IEM_WITH_SETJMP
12031# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12032 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12033#else
12034# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12035 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12036#endif
12037
12038#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12039#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12040#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12041#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12042#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12043#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12044#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12045
12046/**
12047 * Defers the rest of the instruction emulation to a C implementation routine
12048 * and returns, only taking the standard parameters.
12049 *
12050 * @param a_pfnCImpl The pointer to the C routine.
12051 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12052 */
12053#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12054
12055/**
12056 * Defers the rest of instruction emulation to a C implementation routine and
12057 * returns, taking one argument in addition to the standard ones.
12058 *
12059 * @param a_pfnCImpl The pointer to the C routine.
12060 * @param a0 The argument.
12061 */
12062#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12063
12064/**
12065 * Defers the rest of the instruction emulation to a C implementation routine
12066 * and returns, taking two arguments in addition to the standard ones.
12067 *
12068 * @param a_pfnCImpl The pointer to the C routine.
12069 * @param a0 The first extra argument.
12070 * @param a1 The second extra argument.
12071 */
12072#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12073
12074/**
12075 * Defers the rest of the instruction emulation to a C implementation routine
12076 * and returns, taking three arguments in addition to the standard ones.
12077 *
12078 * @param a_pfnCImpl The pointer to the C routine.
12079 * @param a0 The first extra argument.
12080 * @param a1 The second extra argument.
12081 * @param a2 The third extra argument.
12082 */
12083#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12084
12085/**
12086 * Defers the rest of the instruction emulation to a C implementation routine
12087 * and returns, taking four arguments in addition to the standard ones.
12088 *
12089 * @param a_pfnCImpl The pointer to the C routine.
12090 * @param a0 The first extra argument.
12091 * @param a1 The second extra argument.
12092 * @param a2 The third extra argument.
12093 * @param a3 The fourth extra argument.
12094 */
12095#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12096
12097/**
12098 * Defers the rest of the instruction emulation to a C implementation routine
12099 * and returns, taking two arguments in addition to the standard ones.
12100 *
12101 * @param a_pfnCImpl The pointer to the C routine.
12102 * @param a0 The first extra argument.
12103 * @param a1 The second extra argument.
12104 * @param a2 The third extra argument.
12105 * @param a3 The fourth extra argument.
12106 * @param a4 The fifth extra argument.
12107 */
12108#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12109
12110/**
12111 * Defers the entire instruction emulation to a C implementation routine and
12112 * returns, only taking the standard parameters.
12113 *
12114 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12115 *
12116 * @param a_pfnCImpl The pointer to the C routine.
12117 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12118 */
12119#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12120
12121/**
12122 * Defers the entire instruction emulation to a C implementation routine and
12123 * returns, taking one argument in addition to the standard ones.
12124 *
12125 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12126 *
12127 * @param a_pfnCImpl The pointer to the C routine.
12128 * @param a0 The argument.
12129 */
12130#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12131
12132/**
12133 * Defers the entire instruction emulation to a C implementation routine and
12134 * returns, taking two arguments in addition to the standard ones.
12135 *
12136 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12137 *
12138 * @param a_pfnCImpl The pointer to the C routine.
12139 * @param a0 The first extra argument.
12140 * @param a1 The second extra argument.
12141 */
12142#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12143
12144/**
12145 * Defers the entire instruction emulation to a C implementation routine and
12146 * returns, taking three arguments in addition to the standard ones.
12147 *
12148 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12149 *
12150 * @param a_pfnCImpl The pointer to the C routine.
12151 * @param a0 The first extra argument.
12152 * @param a1 The second extra argument.
12153 * @param a2 The third extra argument.
12154 */
12155#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12156
12157/**
12158 * Calls a FPU assembly implementation taking one visible argument.
12159 *
12160 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12161 * @param a0 The first extra argument.
12162 */
12163#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12164 do { \
12165 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12166 } while (0)
12167
12168/**
12169 * Calls a FPU assembly implementation taking two visible arguments.
12170 *
12171 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12172 * @param a0 The first extra argument.
12173 * @param a1 The second extra argument.
12174 */
12175#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12176 do { \
12177 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12178 } while (0)
12179
12180/**
12181 * Calls a FPU assembly implementation taking three visible arguments.
12182 *
12183 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12184 * @param a0 The first extra argument.
12185 * @param a1 The second extra argument.
12186 * @param a2 The third extra argument.
12187 */
12188#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12189 do { \
12190 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12191 } while (0)
12192
12193#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12194 do { \
12195 (a_FpuData).FSW = (a_FSW); \
12196 (a_FpuData).r80Result = *(a_pr80Value); \
12197 } while (0)
12198
12199/** Pushes FPU result onto the stack. */
12200#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12201 iemFpuPushResult(pVCpu, &a_FpuData)
12202/** Pushes FPU result onto the stack and sets the FPUDP. */
12203#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12204 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12205
12206/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12207#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12208 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12209
12210/** Stores FPU result in a stack register. */
12211#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12212 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12213/** Stores FPU result in a stack register and pops the stack. */
12214#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12215 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12216/** Stores FPU result in a stack register and sets the FPUDP. */
12217#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12218 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12219/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12220 * stack. */
12221#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12222 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12223
12224/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12225#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12226 iemFpuUpdateOpcodeAndIp(pVCpu)
12227/** Free a stack register (for FFREE and FFREEP). */
12228#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12229 iemFpuStackFree(pVCpu, a_iStReg)
12230/** Increment the FPU stack pointer. */
12231#define IEM_MC_FPU_STACK_INC_TOP() \
12232 iemFpuStackIncTop(pVCpu)
12233/** Decrement the FPU stack pointer. */
12234#define IEM_MC_FPU_STACK_DEC_TOP() \
12235 iemFpuStackDecTop(pVCpu)
12236
12237/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12238#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12239 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12240/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12241#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12242 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12243/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12244#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12245 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12246/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12247#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12248 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12249/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12250 * stack. */
12251#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12252 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12253/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12254#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12255 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12256
12257/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12258#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12259 iemFpuStackUnderflow(pVCpu, a_iStDst)
12260/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12261 * stack. */
12262#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12263 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12264/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12265 * FPUDS. */
12266#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12267 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12268/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12269 * FPUDS. Pops stack. */
12270#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12271 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12272/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12273 * stack twice. */
12274#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12275 iemFpuStackUnderflowThenPopPop(pVCpu)
12276/** Raises a FPU stack underflow exception for an instruction pushing a result
12277 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12278#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12279 iemFpuStackPushUnderflow(pVCpu)
12280/** Raises a FPU stack underflow exception for an instruction pushing a result
12281 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12282#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12283 iemFpuStackPushUnderflowTwo(pVCpu)
12284
12285/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12286 * FPUIP, FPUCS and FOP. */
12287#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12288 iemFpuStackPushOverflow(pVCpu)
12289/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12290 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12291#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12292 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12293/** Prepares for using the FPU state.
12294 * Ensures that we can use the host FPU in the current context (RC+R0.
12295 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12296#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12297/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12298#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12299/** Actualizes the guest FPU state so it can be accessed and modified. */
12300#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12301
12302/** Prepares for using the SSE state.
12303 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12304 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12305#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12306/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12307#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12308/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12309#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12310
12311/** Prepares for using the AVX state.
12312 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12313 * Ensures the guest AVX state in the CPUMCTX is up to date.
12314 * @note This will include the AVX512 state too when support for it is added
12315 * due to the zero extending feature of VEX instruction. */
12316#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12317/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12318#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12319/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12320#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12321
12322/**
12323 * Calls a MMX assembly implementation taking two visible arguments.
12324 *
12325 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12326 * @param a0 The first extra argument.
12327 * @param a1 The second extra argument.
12328 */
12329#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12330 do { \
12331 IEM_MC_PREPARE_FPU_USAGE(); \
12332 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12333 } while (0)
12334
12335/**
12336 * Calls a MMX assembly implementation taking three visible arguments.
12337 *
12338 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12339 * @param a0 The first extra argument.
12340 * @param a1 The second extra argument.
12341 * @param a2 The third extra argument.
12342 */
12343#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12344 do { \
12345 IEM_MC_PREPARE_FPU_USAGE(); \
12346 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12347 } while (0)
12348
12349
12350/**
12351 * Calls a SSE assembly implementation taking two visible arguments.
12352 *
12353 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12354 * @param a0 The first extra argument.
12355 * @param a1 The second extra argument.
12356 */
12357#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12358 do { \
12359 IEM_MC_PREPARE_SSE_USAGE(); \
12360 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12361 } while (0)
12362
12363/**
12364 * Calls a SSE assembly implementation taking three visible arguments.
12365 *
12366 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12367 * @param a0 The first extra argument.
12368 * @param a1 The second extra argument.
12369 * @param a2 The third extra argument.
12370 */
12371#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12372 do { \
12373 IEM_MC_PREPARE_SSE_USAGE(); \
12374 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12375 } while (0)
12376
12377
12378/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12379 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12380#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12381 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12382
12383/**
12384 * Calls a AVX assembly implementation taking two visible arguments.
12385 *
12386 * There is one implicit zero'th argument, a pointer to the extended state.
12387 *
12388 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12389 * @param a1 The first extra argument.
12390 * @param a2 The second extra argument.
12391 */
12392#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12393 do { \
12394 IEM_MC_PREPARE_AVX_USAGE(); \
12395 a_pfnAImpl(pXState, (a1), (a2)); \
12396 } while (0)
12397
12398/**
12399 * Calls a AVX assembly implementation taking three visible arguments.
12400 *
12401 * There is one implicit zero'th argument, a pointer to the extended state.
12402 *
12403 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12404 * @param a1 The first extra argument.
12405 * @param a2 The second extra argument.
12406 * @param a3 The third extra argument.
12407 */
12408#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12409 do { \
12410 IEM_MC_PREPARE_AVX_USAGE(); \
12411 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12412 } while (0)
12413
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12416/** @note Not for IOPL or IF testing. */
12417#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12424 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12425 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12428 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12429 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12430/** @note Not for IOPL or IF testing. */
12431#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12432 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12433 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12434 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12437 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12438 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12439 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12440#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12441#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12442#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12443/** @note Not for IOPL or IF testing. */
12444#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12445 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12446 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12447/** @note Not for IOPL or IF testing. */
12448#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12449 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12450 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12451/** @note Not for IOPL or IF testing. */
12452#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12453 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12454 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12455/** @note Not for IOPL or IF testing. */
12456#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12457 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12458 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12459/** @note Not for IOPL or IF testing. */
12460#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12461 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12462 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12463/** @note Not for IOPL or IF testing. */
12464#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12465 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12466 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12467#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12468#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12469
12470#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12471 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12472#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12473 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12474#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12475 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12476#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12477 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12478#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12479 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12480#define IEM_MC_IF_FCW_IM() \
12481 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12482
12483#define IEM_MC_ELSE() } else {
12484#define IEM_MC_ENDIF() } do {} while (0)
12485
12486/** @} */
12487
12488
12489/** @name Opcode Debug Helpers.
12490 * @{
12491 */
12492#ifdef VBOX_WITH_STATISTICS
12493# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12494#else
12495# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12496#endif
12497
12498#ifdef DEBUG
12499# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12500 do { \
12501 IEMOP_INC_STATS(a_Stats); \
12502 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12503 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12504 } while (0)
12505
12506# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12507 do { \
12508 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12509 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12510 (void)RT_CONCAT(OP_,a_Upper); \
12511 (void)(a_fDisHints); \
12512 (void)(a_fIemHints); \
12513 } while (0)
12514
12515# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12516 do { \
12517 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12518 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12519 (void)RT_CONCAT(OP_,a_Upper); \
12520 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12521 (void)(a_fDisHints); \
12522 (void)(a_fIemHints); \
12523 } while (0)
12524
12525# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12526 do { \
12527 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12528 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12529 (void)RT_CONCAT(OP_,a_Upper); \
12530 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12531 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12532 (void)(a_fDisHints); \
12533 (void)(a_fIemHints); \
12534 } while (0)
12535
12536# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12537 do { \
12538 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12539 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12540 (void)RT_CONCAT(OP_,a_Upper); \
12541 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12542 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12543 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12544 (void)(a_fDisHints); \
12545 (void)(a_fIemHints); \
12546 } while (0)
12547
12548# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12549 do { \
12550 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12551 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12552 (void)RT_CONCAT(OP_,a_Upper); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12554 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12555 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12556 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12557 (void)(a_fDisHints); \
12558 (void)(a_fIemHints); \
12559 } while (0)
12560
12561#else
12562# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12563
12564# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12565 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12566# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12567 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12568# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12569 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12570# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12571 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12572# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12573 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12574
12575#endif
12576
12577#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC0EX(a_Lower, \
12579 #a_Lower, \
12580 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12581#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12582 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12583 #a_Lower " " #a_Op1, \
12584 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12585#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12586 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12587 #a_Lower " " #a_Op1 "," #a_Op2, \
12588 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12589#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12590 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12591 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12592 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12593#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12594 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12595 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12596 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12597
12598/** @} */
12599
12600
12601/** @name Opcode Helpers.
12602 * @{
12603 */
12604
12605#ifdef IN_RING3
12606# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12607 do { \
12608 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12609 else \
12610 { \
12611 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12612 return IEMOP_RAISE_INVALID_OPCODE(); \
12613 } \
12614 } while (0)
12615#else
12616# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12617 do { \
12618 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12619 else return IEMOP_RAISE_INVALID_OPCODE(); \
12620 } while (0)
12621#endif
12622
12623/** The instruction requires a 186 or later. */
12624#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12625# define IEMOP_HLP_MIN_186() do { } while (0)
12626#else
12627# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12628#endif
12629
12630/** The instruction requires a 286 or later. */
12631#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12632# define IEMOP_HLP_MIN_286() do { } while (0)
12633#else
12634# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12635#endif
12636
12637/** The instruction requires a 386 or later. */
12638#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12639# define IEMOP_HLP_MIN_386() do { } while (0)
12640#else
12641# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12642#endif
12643
12644/** The instruction requires a 386 or later if the given expression is true. */
12645#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12646# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12647#else
12648# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12649#endif
12650
12651/** The instruction requires a 486 or later. */
12652#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12653# define IEMOP_HLP_MIN_486() do { } while (0)
12654#else
12655# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12656#endif
12657
12658/** The instruction requires a Pentium (586) or later. */
12659#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12660# define IEMOP_HLP_MIN_586() do { } while (0)
12661#else
12662# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12663#endif
12664
12665/** The instruction requires a PentiumPro (686) or later. */
12666#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12667# define IEMOP_HLP_MIN_686() do { } while (0)
12668#else
12669# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12670#endif
12671
12672
12673/** The instruction raises an \#UD in real and V8086 mode. */
12674#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12675 do \
12676 { \
12677 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12678 else return IEMOP_RAISE_INVALID_OPCODE(); \
12679 } while (0)
12680
12681/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12682 * 64-bit mode. */
12683#define IEMOP_HLP_NO_64BIT() \
12684 do \
12685 { \
12686 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12687 return IEMOP_RAISE_INVALID_OPCODE(); \
12688 } while (0)
12689
12690/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12691 * 64-bit mode. */
12692#define IEMOP_HLP_ONLY_64BIT() \
12693 do \
12694 { \
12695 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12696 return IEMOP_RAISE_INVALID_OPCODE(); \
12697 } while (0)
12698
12699/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12700#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12701 do \
12702 { \
12703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12704 iemRecalEffOpSize64Default(pVCpu); \
12705 } while (0)
12706
12707/** The instruction has 64-bit operand size if 64-bit mode. */
12708#define IEMOP_HLP_64BIT_OP_SIZE() \
12709 do \
12710 { \
12711 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12712 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12713 } while (0)
12714
12715/** Only a REX prefix immediately preceeding the first opcode byte takes
12716 * effect. This macro helps ensuring this as well as logging bad guest code. */
12717#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12718 do \
12719 { \
12720 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12721 { \
12722 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12723 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12724 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12725 pVCpu->iem.s.uRexB = 0; \
12726 pVCpu->iem.s.uRexIndex = 0; \
12727 pVCpu->iem.s.uRexReg = 0; \
12728 iemRecalEffOpSize(pVCpu); \
12729 } \
12730 } while (0)
12731
12732/**
12733 * Done decoding.
12734 */
12735#define IEMOP_HLP_DONE_DECODING() \
12736 do \
12737 { \
12738 /*nothing for now, maybe later... */ \
12739 } while (0)
12740
12741/**
12742 * Done decoding, raise \#UD exception if lock prefix present.
12743 */
12744#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12745 do \
12746 { \
12747 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12748 { /* likely */ } \
12749 else \
12750 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12751 } while (0)
12752
12753
12754/**
12755 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12756 * repnz or size prefixes are present, or if in real or v8086 mode.
12757 */
12758#define IEMOP_HLP_DONE_VEX_DECODING() \
12759 do \
12760 { \
12761 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12762 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12763 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12764 { /* likely */ } \
12765 else \
12766 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12767 } while (0)
12768
12769/**
12770 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12771 * repnz or size prefixes are present, or if in real or v8086 mode.
12772 */
12773#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12774 do \
12775 { \
12776 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12777 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12778 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12779 && pVCpu->iem.s.uVexLength == 0)) \
12780 { /* likely */ } \
12781 else \
12782 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12783 } while (0)
12784
12785
12786/**
12787 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12788 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12789 * register 0, or if in real or v8086 mode.
12790 */
12791#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12792 do \
12793 { \
12794 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12795 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12796 && !pVCpu->iem.s.uVex3rdReg \
12797 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12798 { /* likely */ } \
12799 else \
12800 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12801 } while (0)
12802
12803/**
12804 * Done decoding VEX, no V, L=0.
12805 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12806 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12807 */
12808#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12809 do \
12810 { \
12811 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12812 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12813 && pVCpu->iem.s.uVexLength == 0 \
12814 && pVCpu->iem.s.uVex3rdReg == 0 \
12815 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12816 { /* likely */ } \
12817 else \
12818 return IEMOP_RAISE_INVALID_OPCODE(); \
12819 } while (0)
12820
12821#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12822 do \
12823 { \
12824 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12825 { /* likely */ } \
12826 else \
12827 { \
12828 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12829 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12830 } \
12831 } while (0)
12832#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12833 do \
12834 { \
12835 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12836 { /* likely */ } \
12837 else \
12838 { \
12839 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12840 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12841 } \
12842 } while (0)
12843
12844/**
12845 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12846 * are present.
12847 */
12848#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12849 do \
12850 { \
12851 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12852 { /* likely */ } \
12853 else \
12854 return IEMOP_RAISE_INVALID_OPCODE(); \
12855 } while (0)
12856
12857
12858#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
12859/** Check and handles SVM nested-guest instruction intercept and updates
12860 * NRIP if needed. */
12861# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12862 do \
12863 { \
12864 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12865 { \
12866 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12867 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12868 } \
12869 } while (0)
12870
12871/** Check and handle SVM nested-guest CR0 read intercept. */
12872# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12873 do \
12874 { \
12875 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12876 { \
12877 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12878 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12879 } \
12880 } while (0)
12881
12882#else /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12883# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12884# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12885#endif /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12886
12887
12888/**
12889 * Calculates the effective address of a ModR/M memory operand.
12890 *
12891 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12892 *
12893 * @return Strict VBox status code.
12894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12895 * @param bRm The ModRM byte.
12896 * @param cbImm The size of any immediate following the
12897 * effective address opcode bytes. Important for
12898 * RIP relative addressing.
12899 * @param pGCPtrEff Where to return the effective address.
12900 */
12901IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12902{
12903 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12904 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12905# define SET_SS_DEF() \
12906 do \
12907 { \
12908 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12909 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12910 } while (0)
12911
12912 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12913 {
12914/** @todo Check the effective address size crap! */
12915 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12916 {
12917 uint16_t u16EffAddr;
12918
12919 /* Handle the disp16 form with no registers first. */
12920 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12921 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12922 else
12923 {
12924 /* Get the displacment. */
12925 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12926 {
12927 case 0: u16EffAddr = 0; break;
12928 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12929 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12930 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12931 }
12932
12933 /* Add the base and index registers to the disp. */
12934 switch (bRm & X86_MODRM_RM_MASK)
12935 {
12936 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12937 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12938 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12939 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12940 case 4: u16EffAddr += pCtx->si; break;
12941 case 5: u16EffAddr += pCtx->di; break;
12942 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12943 case 7: u16EffAddr += pCtx->bx; break;
12944 }
12945 }
12946
12947 *pGCPtrEff = u16EffAddr;
12948 }
12949 else
12950 {
12951 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12952 uint32_t u32EffAddr;
12953
12954 /* Handle the disp32 form with no registers first. */
12955 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12956 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12957 else
12958 {
12959 /* Get the register (or SIB) value. */
12960 switch ((bRm & X86_MODRM_RM_MASK))
12961 {
12962 case 0: u32EffAddr = pCtx->eax; break;
12963 case 1: u32EffAddr = pCtx->ecx; break;
12964 case 2: u32EffAddr = pCtx->edx; break;
12965 case 3: u32EffAddr = pCtx->ebx; break;
12966 case 4: /* SIB */
12967 {
12968 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12969
12970 /* Get the index and scale it. */
12971 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12972 {
12973 case 0: u32EffAddr = pCtx->eax; break;
12974 case 1: u32EffAddr = pCtx->ecx; break;
12975 case 2: u32EffAddr = pCtx->edx; break;
12976 case 3: u32EffAddr = pCtx->ebx; break;
12977 case 4: u32EffAddr = 0; /*none */ break;
12978 case 5: u32EffAddr = pCtx->ebp; break;
12979 case 6: u32EffAddr = pCtx->esi; break;
12980 case 7: u32EffAddr = pCtx->edi; break;
12981 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12982 }
12983 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12984
12985 /* add base */
12986 switch (bSib & X86_SIB_BASE_MASK)
12987 {
12988 case 0: u32EffAddr += pCtx->eax; break;
12989 case 1: u32EffAddr += pCtx->ecx; break;
12990 case 2: u32EffAddr += pCtx->edx; break;
12991 case 3: u32EffAddr += pCtx->ebx; break;
12992 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12993 case 5:
12994 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12995 {
12996 u32EffAddr += pCtx->ebp;
12997 SET_SS_DEF();
12998 }
12999 else
13000 {
13001 uint32_t u32Disp;
13002 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13003 u32EffAddr += u32Disp;
13004 }
13005 break;
13006 case 6: u32EffAddr += pCtx->esi; break;
13007 case 7: u32EffAddr += pCtx->edi; break;
13008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13009 }
13010 break;
13011 }
13012 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13013 case 6: u32EffAddr = pCtx->esi; break;
13014 case 7: u32EffAddr = pCtx->edi; break;
13015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13016 }
13017
13018 /* Get and add the displacement. */
13019 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13020 {
13021 case 0:
13022 break;
13023 case 1:
13024 {
13025 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13026 u32EffAddr += i8Disp;
13027 break;
13028 }
13029 case 2:
13030 {
13031 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13032 u32EffAddr += u32Disp;
13033 break;
13034 }
13035 default:
13036 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13037 }
13038
13039 }
13040 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13041 *pGCPtrEff = u32EffAddr;
13042 else
13043 {
13044 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13045 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13046 }
13047 }
13048 }
13049 else
13050 {
13051 uint64_t u64EffAddr;
13052
13053 /* Handle the rip+disp32 form with no registers first. */
13054 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13055 {
13056 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13057 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13058 }
13059 else
13060 {
13061 /* Get the register (or SIB) value. */
13062 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13063 {
13064 case 0: u64EffAddr = pCtx->rax; break;
13065 case 1: u64EffAddr = pCtx->rcx; break;
13066 case 2: u64EffAddr = pCtx->rdx; break;
13067 case 3: u64EffAddr = pCtx->rbx; break;
13068 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13069 case 6: u64EffAddr = pCtx->rsi; break;
13070 case 7: u64EffAddr = pCtx->rdi; break;
13071 case 8: u64EffAddr = pCtx->r8; break;
13072 case 9: u64EffAddr = pCtx->r9; break;
13073 case 10: u64EffAddr = pCtx->r10; break;
13074 case 11: u64EffAddr = pCtx->r11; break;
13075 case 13: u64EffAddr = pCtx->r13; break;
13076 case 14: u64EffAddr = pCtx->r14; break;
13077 case 15: u64EffAddr = pCtx->r15; break;
13078 /* SIB */
13079 case 4:
13080 case 12:
13081 {
13082 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13083
13084 /* Get the index and scale it. */
13085 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13086 {
13087 case 0: u64EffAddr = pCtx->rax; break;
13088 case 1: u64EffAddr = pCtx->rcx; break;
13089 case 2: u64EffAddr = pCtx->rdx; break;
13090 case 3: u64EffAddr = pCtx->rbx; break;
13091 case 4: u64EffAddr = 0; /*none */ break;
13092 case 5: u64EffAddr = pCtx->rbp; break;
13093 case 6: u64EffAddr = pCtx->rsi; break;
13094 case 7: u64EffAddr = pCtx->rdi; break;
13095 case 8: u64EffAddr = pCtx->r8; break;
13096 case 9: u64EffAddr = pCtx->r9; break;
13097 case 10: u64EffAddr = pCtx->r10; break;
13098 case 11: u64EffAddr = pCtx->r11; break;
13099 case 12: u64EffAddr = pCtx->r12; break;
13100 case 13: u64EffAddr = pCtx->r13; break;
13101 case 14: u64EffAddr = pCtx->r14; break;
13102 case 15: u64EffAddr = pCtx->r15; break;
13103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13104 }
13105 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13106
13107 /* add base */
13108 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13109 {
13110 case 0: u64EffAddr += pCtx->rax; break;
13111 case 1: u64EffAddr += pCtx->rcx; break;
13112 case 2: u64EffAddr += pCtx->rdx; break;
13113 case 3: u64EffAddr += pCtx->rbx; break;
13114 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13115 case 6: u64EffAddr += pCtx->rsi; break;
13116 case 7: u64EffAddr += pCtx->rdi; break;
13117 case 8: u64EffAddr += pCtx->r8; break;
13118 case 9: u64EffAddr += pCtx->r9; break;
13119 case 10: u64EffAddr += pCtx->r10; break;
13120 case 11: u64EffAddr += pCtx->r11; break;
13121 case 12: u64EffAddr += pCtx->r12; break;
13122 case 14: u64EffAddr += pCtx->r14; break;
13123 case 15: u64EffAddr += pCtx->r15; break;
13124 /* complicated encodings */
13125 case 5:
13126 case 13:
13127 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13128 {
13129 if (!pVCpu->iem.s.uRexB)
13130 {
13131 u64EffAddr += pCtx->rbp;
13132 SET_SS_DEF();
13133 }
13134 else
13135 u64EffAddr += pCtx->r13;
13136 }
13137 else
13138 {
13139 uint32_t u32Disp;
13140 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13141 u64EffAddr += (int32_t)u32Disp;
13142 }
13143 break;
13144 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13145 }
13146 break;
13147 }
13148 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13149 }
13150
13151 /* Get and add the displacement. */
13152 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13153 {
13154 case 0:
13155 break;
13156 case 1:
13157 {
13158 int8_t i8Disp;
13159 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13160 u64EffAddr += i8Disp;
13161 break;
13162 }
13163 case 2:
13164 {
13165 uint32_t u32Disp;
13166 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13167 u64EffAddr += (int32_t)u32Disp;
13168 break;
13169 }
13170 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13171 }
13172
13173 }
13174
13175 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13176 *pGCPtrEff = u64EffAddr;
13177 else
13178 {
13179 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13180 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13181 }
13182 }
13183
13184 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13185 return VINF_SUCCESS;
13186}
13187
13188
13189/**
13190 * Calculates the effective address of a ModR/M memory operand.
13191 *
13192 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13193 *
13194 * @return Strict VBox status code.
13195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13196 * @param bRm The ModRM byte.
13197 * @param cbImm The size of any immediate following the
13198 * effective address opcode bytes. Important for
13199 * RIP relative addressing.
13200 * @param pGCPtrEff Where to return the effective address.
13201 * @param offRsp RSP displacement.
13202 */
13203IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13204{
13205 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13206 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13207# define SET_SS_DEF() \
13208 do \
13209 { \
13210 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13211 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13212 } while (0)
13213
13214 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13215 {
13216/** @todo Check the effective address size crap! */
13217 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13218 {
13219 uint16_t u16EffAddr;
13220
13221 /* Handle the disp16 form with no registers first. */
13222 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13223 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13224 else
13225 {
13226 /* Get the displacment. */
13227 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13228 {
13229 case 0: u16EffAddr = 0; break;
13230 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13231 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13232 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13233 }
13234
13235 /* Add the base and index registers to the disp. */
13236 switch (bRm & X86_MODRM_RM_MASK)
13237 {
13238 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13239 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13240 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13241 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13242 case 4: u16EffAddr += pCtx->si; break;
13243 case 5: u16EffAddr += pCtx->di; break;
13244 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13245 case 7: u16EffAddr += pCtx->bx; break;
13246 }
13247 }
13248
13249 *pGCPtrEff = u16EffAddr;
13250 }
13251 else
13252 {
13253 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13254 uint32_t u32EffAddr;
13255
13256 /* Handle the disp32 form with no registers first. */
13257 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13258 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13259 else
13260 {
13261 /* Get the register (or SIB) value. */
13262 switch ((bRm & X86_MODRM_RM_MASK))
13263 {
13264 case 0: u32EffAddr = pCtx->eax; break;
13265 case 1: u32EffAddr = pCtx->ecx; break;
13266 case 2: u32EffAddr = pCtx->edx; break;
13267 case 3: u32EffAddr = pCtx->ebx; break;
13268 case 4: /* SIB */
13269 {
13270 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13271
13272 /* Get the index and scale it. */
13273 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13274 {
13275 case 0: u32EffAddr = pCtx->eax; break;
13276 case 1: u32EffAddr = pCtx->ecx; break;
13277 case 2: u32EffAddr = pCtx->edx; break;
13278 case 3: u32EffAddr = pCtx->ebx; break;
13279 case 4: u32EffAddr = 0; /*none */ break;
13280 case 5: u32EffAddr = pCtx->ebp; break;
13281 case 6: u32EffAddr = pCtx->esi; break;
13282 case 7: u32EffAddr = pCtx->edi; break;
13283 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13284 }
13285 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13286
13287 /* add base */
13288 switch (bSib & X86_SIB_BASE_MASK)
13289 {
13290 case 0: u32EffAddr += pCtx->eax; break;
13291 case 1: u32EffAddr += pCtx->ecx; break;
13292 case 2: u32EffAddr += pCtx->edx; break;
13293 case 3: u32EffAddr += pCtx->ebx; break;
13294 case 4:
13295 u32EffAddr += pCtx->esp + offRsp;
13296 SET_SS_DEF();
13297 break;
13298 case 5:
13299 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13300 {
13301 u32EffAddr += pCtx->ebp;
13302 SET_SS_DEF();
13303 }
13304 else
13305 {
13306 uint32_t u32Disp;
13307 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13308 u32EffAddr += u32Disp;
13309 }
13310 break;
13311 case 6: u32EffAddr += pCtx->esi; break;
13312 case 7: u32EffAddr += pCtx->edi; break;
13313 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13314 }
13315 break;
13316 }
13317 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13318 case 6: u32EffAddr = pCtx->esi; break;
13319 case 7: u32EffAddr = pCtx->edi; break;
13320 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13321 }
13322
13323 /* Get and add the displacement. */
13324 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13325 {
13326 case 0:
13327 break;
13328 case 1:
13329 {
13330 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13331 u32EffAddr += i8Disp;
13332 break;
13333 }
13334 case 2:
13335 {
13336 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13337 u32EffAddr += u32Disp;
13338 break;
13339 }
13340 default:
13341 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13342 }
13343
13344 }
13345 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13346 *pGCPtrEff = u32EffAddr;
13347 else
13348 {
13349 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13350 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13351 }
13352 }
13353 }
13354 else
13355 {
13356 uint64_t u64EffAddr;
13357
13358 /* Handle the rip+disp32 form with no registers first. */
13359 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13360 {
13361 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13362 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13363 }
13364 else
13365 {
13366 /* Get the register (or SIB) value. */
13367 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13368 {
13369 case 0: u64EffAddr = pCtx->rax; break;
13370 case 1: u64EffAddr = pCtx->rcx; break;
13371 case 2: u64EffAddr = pCtx->rdx; break;
13372 case 3: u64EffAddr = pCtx->rbx; break;
13373 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13374 case 6: u64EffAddr = pCtx->rsi; break;
13375 case 7: u64EffAddr = pCtx->rdi; break;
13376 case 8: u64EffAddr = pCtx->r8; break;
13377 case 9: u64EffAddr = pCtx->r9; break;
13378 case 10: u64EffAddr = pCtx->r10; break;
13379 case 11: u64EffAddr = pCtx->r11; break;
13380 case 13: u64EffAddr = pCtx->r13; break;
13381 case 14: u64EffAddr = pCtx->r14; break;
13382 case 15: u64EffAddr = pCtx->r15; break;
13383 /* SIB */
13384 case 4:
13385 case 12:
13386 {
13387 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13388
13389 /* Get the index and scale it. */
13390 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13391 {
13392 case 0: u64EffAddr = pCtx->rax; break;
13393 case 1: u64EffAddr = pCtx->rcx; break;
13394 case 2: u64EffAddr = pCtx->rdx; break;
13395 case 3: u64EffAddr = pCtx->rbx; break;
13396 case 4: u64EffAddr = 0; /*none */ break;
13397 case 5: u64EffAddr = pCtx->rbp; break;
13398 case 6: u64EffAddr = pCtx->rsi; break;
13399 case 7: u64EffAddr = pCtx->rdi; break;
13400 case 8: u64EffAddr = pCtx->r8; break;
13401 case 9: u64EffAddr = pCtx->r9; break;
13402 case 10: u64EffAddr = pCtx->r10; break;
13403 case 11: u64EffAddr = pCtx->r11; break;
13404 case 12: u64EffAddr = pCtx->r12; break;
13405 case 13: u64EffAddr = pCtx->r13; break;
13406 case 14: u64EffAddr = pCtx->r14; break;
13407 case 15: u64EffAddr = pCtx->r15; break;
13408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13409 }
13410 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13411
13412 /* add base */
13413 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13414 {
13415 case 0: u64EffAddr += pCtx->rax; break;
13416 case 1: u64EffAddr += pCtx->rcx; break;
13417 case 2: u64EffAddr += pCtx->rdx; break;
13418 case 3: u64EffAddr += pCtx->rbx; break;
13419 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13420 case 6: u64EffAddr += pCtx->rsi; break;
13421 case 7: u64EffAddr += pCtx->rdi; break;
13422 case 8: u64EffAddr += pCtx->r8; break;
13423 case 9: u64EffAddr += pCtx->r9; break;
13424 case 10: u64EffAddr += pCtx->r10; break;
13425 case 11: u64EffAddr += pCtx->r11; break;
13426 case 12: u64EffAddr += pCtx->r12; break;
13427 case 14: u64EffAddr += pCtx->r14; break;
13428 case 15: u64EffAddr += pCtx->r15; break;
13429 /* complicated encodings */
13430 case 5:
13431 case 13:
13432 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13433 {
13434 if (!pVCpu->iem.s.uRexB)
13435 {
13436 u64EffAddr += pCtx->rbp;
13437 SET_SS_DEF();
13438 }
13439 else
13440 u64EffAddr += pCtx->r13;
13441 }
13442 else
13443 {
13444 uint32_t u32Disp;
13445 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13446 u64EffAddr += (int32_t)u32Disp;
13447 }
13448 break;
13449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13450 }
13451 break;
13452 }
13453 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13454 }
13455
13456 /* Get and add the displacement. */
13457 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13458 {
13459 case 0:
13460 break;
13461 case 1:
13462 {
13463 int8_t i8Disp;
13464 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13465 u64EffAddr += i8Disp;
13466 break;
13467 }
13468 case 2:
13469 {
13470 uint32_t u32Disp;
13471 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13472 u64EffAddr += (int32_t)u32Disp;
13473 break;
13474 }
13475 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13476 }
13477
13478 }
13479
13480 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13481 *pGCPtrEff = u64EffAddr;
13482 else
13483 {
13484 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13485 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13486 }
13487 }
13488
13489 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13490 return VINF_SUCCESS;
13491}
13492
13493
13494#ifdef IEM_WITH_SETJMP
13495/**
13496 * Calculates the effective address of a ModR/M memory operand.
13497 *
13498 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13499 *
13500 * May longjmp on internal error.
13501 *
13502 * @return The effective address.
13503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13504 * @param bRm The ModRM byte.
13505 * @param cbImm The size of any immediate following the
13506 * effective address opcode bytes. Important for
13507 * RIP relative addressing.
13508 */
13509IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13510{
13511 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13512 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13513# define SET_SS_DEF() \
13514 do \
13515 { \
13516 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13517 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13518 } while (0)
13519
13520 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13521 {
13522/** @todo Check the effective address size crap! */
13523 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13524 {
13525 uint16_t u16EffAddr;
13526
13527 /* Handle the disp16 form with no registers first. */
13528 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13529 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13530 else
13531 {
13532 /* Get the displacment. */
13533 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13534 {
13535 case 0: u16EffAddr = 0; break;
13536 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13537 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13538 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13539 }
13540
13541 /* Add the base and index registers to the disp. */
13542 switch (bRm & X86_MODRM_RM_MASK)
13543 {
13544 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13545 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13546 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13547 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13548 case 4: u16EffAddr += pCtx->si; break;
13549 case 5: u16EffAddr += pCtx->di; break;
13550 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13551 case 7: u16EffAddr += pCtx->bx; break;
13552 }
13553 }
13554
13555 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13556 return u16EffAddr;
13557 }
13558
13559 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13560 uint32_t u32EffAddr;
13561
13562 /* Handle the disp32 form with no registers first. */
13563 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13564 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13565 else
13566 {
13567 /* Get the register (or SIB) value. */
13568 switch ((bRm & X86_MODRM_RM_MASK))
13569 {
13570 case 0: u32EffAddr = pCtx->eax; break;
13571 case 1: u32EffAddr = pCtx->ecx; break;
13572 case 2: u32EffAddr = pCtx->edx; break;
13573 case 3: u32EffAddr = pCtx->ebx; break;
13574 case 4: /* SIB */
13575 {
13576 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13577
13578 /* Get the index and scale it. */
13579 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13580 {
13581 case 0: u32EffAddr = pCtx->eax; break;
13582 case 1: u32EffAddr = pCtx->ecx; break;
13583 case 2: u32EffAddr = pCtx->edx; break;
13584 case 3: u32EffAddr = pCtx->ebx; break;
13585 case 4: u32EffAddr = 0; /*none */ break;
13586 case 5: u32EffAddr = pCtx->ebp; break;
13587 case 6: u32EffAddr = pCtx->esi; break;
13588 case 7: u32EffAddr = pCtx->edi; break;
13589 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13590 }
13591 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13592
13593 /* add base */
13594 switch (bSib & X86_SIB_BASE_MASK)
13595 {
13596 case 0: u32EffAddr += pCtx->eax; break;
13597 case 1: u32EffAddr += pCtx->ecx; break;
13598 case 2: u32EffAddr += pCtx->edx; break;
13599 case 3: u32EffAddr += pCtx->ebx; break;
13600 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13601 case 5:
13602 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13603 {
13604 u32EffAddr += pCtx->ebp;
13605 SET_SS_DEF();
13606 }
13607 else
13608 {
13609 uint32_t u32Disp;
13610 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13611 u32EffAddr += u32Disp;
13612 }
13613 break;
13614 case 6: u32EffAddr += pCtx->esi; break;
13615 case 7: u32EffAddr += pCtx->edi; break;
13616 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13617 }
13618 break;
13619 }
13620 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13621 case 6: u32EffAddr = pCtx->esi; break;
13622 case 7: u32EffAddr = pCtx->edi; break;
13623 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13624 }
13625
13626 /* Get and add the displacement. */
13627 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13628 {
13629 case 0:
13630 break;
13631 case 1:
13632 {
13633 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13634 u32EffAddr += i8Disp;
13635 break;
13636 }
13637 case 2:
13638 {
13639 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13640 u32EffAddr += u32Disp;
13641 break;
13642 }
13643 default:
13644 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13645 }
13646 }
13647
13648 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13649 {
13650 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13651 return u32EffAddr;
13652 }
13653 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13654 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13655 return u32EffAddr & UINT16_MAX;
13656 }
13657
13658 uint64_t u64EffAddr;
13659
13660 /* Handle the rip+disp32 form with no registers first. */
13661 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13662 {
13663 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13664 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13665 }
13666 else
13667 {
13668 /* Get the register (or SIB) value. */
13669 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13670 {
13671 case 0: u64EffAddr = pCtx->rax; break;
13672 case 1: u64EffAddr = pCtx->rcx; break;
13673 case 2: u64EffAddr = pCtx->rdx; break;
13674 case 3: u64EffAddr = pCtx->rbx; break;
13675 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13676 case 6: u64EffAddr = pCtx->rsi; break;
13677 case 7: u64EffAddr = pCtx->rdi; break;
13678 case 8: u64EffAddr = pCtx->r8; break;
13679 case 9: u64EffAddr = pCtx->r9; break;
13680 case 10: u64EffAddr = pCtx->r10; break;
13681 case 11: u64EffAddr = pCtx->r11; break;
13682 case 13: u64EffAddr = pCtx->r13; break;
13683 case 14: u64EffAddr = pCtx->r14; break;
13684 case 15: u64EffAddr = pCtx->r15; break;
13685 /* SIB */
13686 case 4:
13687 case 12:
13688 {
13689 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13690
13691 /* Get the index and scale it. */
13692 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13693 {
13694 case 0: u64EffAddr = pCtx->rax; break;
13695 case 1: u64EffAddr = pCtx->rcx; break;
13696 case 2: u64EffAddr = pCtx->rdx; break;
13697 case 3: u64EffAddr = pCtx->rbx; break;
13698 case 4: u64EffAddr = 0; /*none */ break;
13699 case 5: u64EffAddr = pCtx->rbp; break;
13700 case 6: u64EffAddr = pCtx->rsi; break;
13701 case 7: u64EffAddr = pCtx->rdi; break;
13702 case 8: u64EffAddr = pCtx->r8; break;
13703 case 9: u64EffAddr = pCtx->r9; break;
13704 case 10: u64EffAddr = pCtx->r10; break;
13705 case 11: u64EffAddr = pCtx->r11; break;
13706 case 12: u64EffAddr = pCtx->r12; break;
13707 case 13: u64EffAddr = pCtx->r13; break;
13708 case 14: u64EffAddr = pCtx->r14; break;
13709 case 15: u64EffAddr = pCtx->r15; break;
13710 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13711 }
13712 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13713
13714 /* add base */
13715 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13716 {
13717 case 0: u64EffAddr += pCtx->rax; break;
13718 case 1: u64EffAddr += pCtx->rcx; break;
13719 case 2: u64EffAddr += pCtx->rdx; break;
13720 case 3: u64EffAddr += pCtx->rbx; break;
13721 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13722 case 6: u64EffAddr += pCtx->rsi; break;
13723 case 7: u64EffAddr += pCtx->rdi; break;
13724 case 8: u64EffAddr += pCtx->r8; break;
13725 case 9: u64EffAddr += pCtx->r9; break;
13726 case 10: u64EffAddr += pCtx->r10; break;
13727 case 11: u64EffAddr += pCtx->r11; break;
13728 case 12: u64EffAddr += pCtx->r12; break;
13729 case 14: u64EffAddr += pCtx->r14; break;
13730 case 15: u64EffAddr += pCtx->r15; break;
13731 /* complicated encodings */
13732 case 5:
13733 case 13:
13734 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13735 {
13736 if (!pVCpu->iem.s.uRexB)
13737 {
13738 u64EffAddr += pCtx->rbp;
13739 SET_SS_DEF();
13740 }
13741 else
13742 u64EffAddr += pCtx->r13;
13743 }
13744 else
13745 {
13746 uint32_t u32Disp;
13747 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13748 u64EffAddr += (int32_t)u32Disp;
13749 }
13750 break;
13751 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13752 }
13753 break;
13754 }
13755 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13756 }
13757
13758 /* Get and add the displacement. */
13759 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13760 {
13761 case 0:
13762 break;
13763 case 1:
13764 {
13765 int8_t i8Disp;
13766 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13767 u64EffAddr += i8Disp;
13768 break;
13769 }
13770 case 2:
13771 {
13772 uint32_t u32Disp;
13773 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13774 u64EffAddr += (int32_t)u32Disp;
13775 break;
13776 }
13777 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13778 }
13779
13780 }
13781
13782 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13783 {
13784 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13785 return u64EffAddr;
13786 }
13787 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13788 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13789 return u64EffAddr & UINT32_MAX;
13790}
13791#endif /* IEM_WITH_SETJMP */
13792
13793
13794/** @} */
13795
13796
13797
13798/*
13799 * Include the instructions
13800 */
13801#include "IEMAllInstructions.cpp.h"
13802
13803
13804
13805
13806#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13807
13808/**
13809 * Sets up execution verification mode.
13810 */
13811IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13812{
13813 PVMCPU pVCpu = pVCpu;
13814 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13815
13816 /*
13817 * Always note down the address of the current instruction.
13818 */
13819 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13820 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13821
13822 /*
13823 * Enable verification and/or logging.
13824 */
13825 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13826 if ( fNewNoRem
13827 && ( 0
13828#if 0 /* auto enable on first paged protected mode interrupt */
13829 || ( pOrgCtx->eflags.Bits.u1IF
13830 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13831 && TRPMHasTrap(pVCpu)
13832 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13833#endif
13834#if 0
13835 || ( pOrgCtx->cs == 0x10
13836 && ( pOrgCtx->rip == 0x90119e3e
13837 || pOrgCtx->rip == 0x901d9810)
13838#endif
13839#if 0 /* Auto enable DSL - FPU stuff. */
13840 || ( pOrgCtx->cs == 0x10
13841 && (// pOrgCtx->rip == 0xc02ec07f
13842 //|| pOrgCtx->rip == 0xc02ec082
13843 //|| pOrgCtx->rip == 0xc02ec0c9
13844 0
13845 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13846#endif
13847#if 0 /* Auto enable DSL - fstp st0 stuff. */
13848 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13849#endif
13850#if 0
13851 || pOrgCtx->rip == 0x9022bb3a
13852#endif
13853#if 0
13854 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13855#endif
13856#if 0
13857 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13858 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13859#endif
13860#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13861 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13862 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13863 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13864#endif
13865#if 0 /* NT4SP1 - xadd early boot. */
13866 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13867#endif
13868#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13869 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13870#endif
13871#if 0 /* NT4SP1 - cmpxchg (AMD). */
13872 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13873#endif
13874#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13875 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13876#endif
13877#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13878 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13879
13880#endif
13881#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13882 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13883
13884#endif
13885#if 0 /* NT4SP1 - frstor [ecx] */
13886 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13887#endif
13888#if 0 /* xxxxxx - All long mode code. */
13889 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13890#endif
13891#if 0 /* rep movsq linux 3.7 64-bit boot. */
13892 || (pOrgCtx->rip == 0x0000000000100241)
13893#endif
13894#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13895 || (pOrgCtx->rip == 0x000000000215e240)
13896#endif
13897#if 0 /* DOS's size-overridden iret to v8086. */
13898 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13899#endif
13900 )
13901 )
13902 {
13903 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13904 RTLogFlags(NULL, "enabled");
13905 fNewNoRem = false;
13906 }
13907 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13908 {
13909 pVCpu->iem.s.fNoRem = fNewNoRem;
13910 if (!fNewNoRem)
13911 {
13912 LogAlways(("Enabling verification mode!\n"));
13913 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13914 }
13915 else
13916 LogAlways(("Disabling verification mode!\n"));
13917 }
13918
13919 /*
13920 * Switch state.
13921 */
13922 if (IEM_VERIFICATION_ENABLED(pVCpu))
13923 {
13924 static CPUMCTX s_DebugCtx; /* Ugly! */
13925
13926 s_DebugCtx = *pOrgCtx;
13927 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13928 }
13929
13930 /*
13931 * See if there is an interrupt pending in TRPM and inject it if we can.
13932 */
13933 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13934 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13935#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13936 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13937 if (fIntrEnabled)
13938 {
13939 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13940 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13941 else
13942 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13943 }
13944#else
13945 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13946#endif
13947 if ( fIntrEnabled
13948 && TRPMHasTrap(pVCpu)
13949 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13950 {
13951 uint8_t u8TrapNo;
13952 TRPMEVENT enmType;
13953 RTGCUINT uErrCode;
13954 RTGCPTR uCr2;
13955 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13956 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13957 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13958 TRPMResetTrap(pVCpu);
13959 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13960 }
13961
13962 /*
13963 * Reset the counters.
13964 */
13965 pVCpu->iem.s.cIOReads = 0;
13966 pVCpu->iem.s.cIOWrites = 0;
13967 pVCpu->iem.s.fIgnoreRaxRdx = false;
13968 pVCpu->iem.s.fOverlappingMovs = false;
13969 pVCpu->iem.s.fProblematicMemory = false;
13970 pVCpu->iem.s.fUndefinedEFlags = 0;
13971
13972 if (IEM_VERIFICATION_ENABLED(pVCpu))
13973 {
13974 /*
13975 * Free all verification records.
13976 */
13977 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13978 pVCpu->iem.s.pIemEvtRecHead = NULL;
13979 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13980 do
13981 {
13982 while (pEvtRec)
13983 {
13984 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13985 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13986 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13987 pEvtRec = pNext;
13988 }
13989 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13990 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13991 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13992 } while (pEvtRec);
13993 }
13994}
13995
13996
13997/**
13998 * Allocate an event record.
13999 * @returns Pointer to a record.
14000 */
14001IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
14002{
14003 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14004 return NULL;
14005
14006 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
14007 if (pEvtRec)
14008 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
14009 else
14010 {
14011 if (!pVCpu->iem.s.ppIemEvtRecNext)
14012 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
14013
14014 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
14015 if (!pEvtRec)
14016 return NULL;
14017 }
14018 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
14019 pEvtRec->pNext = NULL;
14020 return pEvtRec;
14021}
14022
14023
14024/**
14025 * IOMMMIORead notification.
14026 */
14027VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14028{
14029 PVMCPU pVCpu = VMMGetCpu(pVM);
14030 if (!pVCpu)
14031 return;
14032 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14033 if (!pEvtRec)
14034 return;
14035 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14036 pEvtRec->u.RamRead.GCPhys = GCPhys;
14037 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14038 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14039 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14040}
14041
14042
14043/**
14044 * IOMMMIOWrite notification.
14045 */
14046VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14047{
14048 PVMCPU pVCpu = VMMGetCpu(pVM);
14049 if (!pVCpu)
14050 return;
14051 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14052 if (!pEvtRec)
14053 return;
14054 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14055 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14056 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14057 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14058 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14059 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14060 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14061 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14062 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14063}
14064
14065
14066/**
14067 * IOMIOPortRead notification.
14068 */
14069VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14070{
14071 PVMCPU pVCpu = VMMGetCpu(pVM);
14072 if (!pVCpu)
14073 return;
14074 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14075 if (!pEvtRec)
14076 return;
14077 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14078 pEvtRec->u.IOPortRead.Port = Port;
14079 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14080 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14081 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14082}
14083
14084/**
14085 * IOMIOPortWrite notification.
14086 */
14087VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14088{
14089 PVMCPU pVCpu = VMMGetCpu(pVM);
14090 if (!pVCpu)
14091 return;
14092 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14093 if (!pEvtRec)
14094 return;
14095 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14096 pEvtRec->u.IOPortWrite.Port = Port;
14097 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14098 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14099 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14100 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14101}
14102
14103
14104VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14105{
14106 PVMCPU pVCpu = VMMGetCpu(pVM);
14107 if (!pVCpu)
14108 return;
14109 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14110 if (!pEvtRec)
14111 return;
14112 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14113 pEvtRec->u.IOPortStrRead.Port = Port;
14114 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14115 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14116 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14117 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14118}
14119
14120
14121VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14122{
14123 PVMCPU pVCpu = VMMGetCpu(pVM);
14124 if (!pVCpu)
14125 return;
14126 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14127 if (!pEvtRec)
14128 return;
14129 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14130 pEvtRec->u.IOPortStrWrite.Port = Port;
14131 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14132 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14133 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14134 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14135}
14136
14137
14138/**
14139 * Fakes and records an I/O port read.
14140 *
14141 * @returns VINF_SUCCESS.
14142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14143 * @param Port The I/O port.
14144 * @param pu32Value Where to store the fake value.
14145 * @param cbValue The size of the access.
14146 */
14147IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14148{
14149 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14150 if (pEvtRec)
14151 {
14152 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14153 pEvtRec->u.IOPortRead.Port = Port;
14154 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14155 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14156 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14157 }
14158 pVCpu->iem.s.cIOReads++;
14159 *pu32Value = 0xcccccccc;
14160 return VINF_SUCCESS;
14161}
14162
14163
14164/**
14165 * Fakes and records an I/O port write.
14166 *
14167 * @returns VINF_SUCCESS.
14168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14169 * @param Port The I/O port.
14170 * @param u32Value The value being written.
14171 * @param cbValue The size of the access.
14172 */
14173IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14174{
14175 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14176 if (pEvtRec)
14177 {
14178 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14179 pEvtRec->u.IOPortWrite.Port = Port;
14180 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14181 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14182 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14183 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14184 }
14185 pVCpu->iem.s.cIOWrites++;
14186 return VINF_SUCCESS;
14187}
14188
14189
14190/**
14191 * Used to add extra details about a stub case.
14192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14193 */
14194IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14195{
14196 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14197 PVM pVM = pVCpu->CTX_SUFF(pVM);
14198 PVMCPU pVCpu = pVCpu;
14199 char szRegs[4096];
14200 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14201 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14202 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14203 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14204 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14205 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14206 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14207 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14208 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14209 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14210 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14211 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14212 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14213 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14214 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14215 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14216 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14217 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14218 " efer=%016VR{efer}\n"
14219 " pat=%016VR{pat}\n"
14220 " sf_mask=%016VR{sf_mask}\n"
14221 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14222 " lstar=%016VR{lstar}\n"
14223 " star=%016VR{star} cstar=%016VR{cstar}\n"
14224 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14225 );
14226
14227 char szInstr1[256];
14228 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14229 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14230 szInstr1, sizeof(szInstr1), NULL);
14231 char szInstr2[256];
14232 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14233 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14234 szInstr2, sizeof(szInstr2), NULL);
14235
14236 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14237}
14238
14239
14240/**
14241 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14242 * dump to the assertion info.
14243 *
14244 * @param pEvtRec The record to dump.
14245 */
14246IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14247{
14248 switch (pEvtRec->enmEvent)
14249 {
14250 case IEMVERIFYEVENT_IOPORT_READ:
14251 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14252 pEvtRec->u.IOPortWrite.Port,
14253 pEvtRec->u.IOPortWrite.cbValue);
14254 break;
14255 case IEMVERIFYEVENT_IOPORT_WRITE:
14256 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14257 pEvtRec->u.IOPortWrite.Port,
14258 pEvtRec->u.IOPortWrite.cbValue,
14259 pEvtRec->u.IOPortWrite.u32Value);
14260 break;
14261 case IEMVERIFYEVENT_IOPORT_STR_READ:
14262 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14263 pEvtRec->u.IOPortStrWrite.Port,
14264 pEvtRec->u.IOPortStrWrite.cbValue,
14265 pEvtRec->u.IOPortStrWrite.cTransfers);
14266 break;
14267 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14268 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14269 pEvtRec->u.IOPortStrWrite.Port,
14270 pEvtRec->u.IOPortStrWrite.cbValue,
14271 pEvtRec->u.IOPortStrWrite.cTransfers);
14272 break;
14273 case IEMVERIFYEVENT_RAM_READ:
14274 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14275 pEvtRec->u.RamRead.GCPhys,
14276 pEvtRec->u.RamRead.cb);
14277 break;
14278 case IEMVERIFYEVENT_RAM_WRITE:
14279 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14280 pEvtRec->u.RamWrite.GCPhys,
14281 pEvtRec->u.RamWrite.cb,
14282 (int)pEvtRec->u.RamWrite.cb,
14283 pEvtRec->u.RamWrite.ab);
14284 break;
14285 default:
14286 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14287 break;
14288 }
14289}
14290
14291
14292/**
14293 * Raises an assertion on the specified record, showing the given message with
14294 * a record dump attached.
14295 *
14296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14297 * @param pEvtRec1 The first record.
14298 * @param pEvtRec2 The second record.
14299 * @param pszMsg The message explaining why we're asserting.
14300 */
14301IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14302{
14303 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14304 iemVerifyAssertAddRecordDump(pEvtRec1);
14305 iemVerifyAssertAddRecordDump(pEvtRec2);
14306 iemVerifyAssertMsg2(pVCpu);
14307 RTAssertPanic();
14308}
14309
14310
14311/**
14312 * Raises an assertion on the specified record, showing the given message with
14313 * a record dump attached.
14314 *
14315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14316 * @param pEvtRec1 The first record.
14317 * @param pszMsg The message explaining why we're asserting.
14318 */
14319IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14320{
14321 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14322 iemVerifyAssertAddRecordDump(pEvtRec);
14323 iemVerifyAssertMsg2(pVCpu);
14324 RTAssertPanic();
14325}
14326
14327
14328/**
14329 * Verifies a write record.
14330 *
14331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14332 * @param pEvtRec The write record.
14333 * @param fRem Set if REM was doing the other executing. If clear
14334 * it was HM.
14335 */
14336IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14337{
14338 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14339 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14340 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14341 if ( RT_FAILURE(rc)
14342 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14343 {
14344 /* fend off ins */
14345 if ( !pVCpu->iem.s.cIOReads
14346 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14347 || ( pEvtRec->u.RamWrite.cb != 1
14348 && pEvtRec->u.RamWrite.cb != 2
14349 && pEvtRec->u.RamWrite.cb != 4) )
14350 {
14351 /* fend off ROMs and MMIO */
14352 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14353 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14354 {
14355 /* fend off fxsave */
14356 if (pEvtRec->u.RamWrite.cb != 512)
14357 {
14358 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14359 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14360 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14361 RTAssertMsg2Add("%s: %.*Rhxs\n"
14362 "iem: %.*Rhxs\n",
14363 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14364 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14365 iemVerifyAssertAddRecordDump(pEvtRec);
14366 iemVerifyAssertMsg2(pVCpu);
14367 RTAssertPanic();
14368 }
14369 }
14370 }
14371 }
14372
14373}
14374
14375/**
14376 * Performs the post-execution verfication checks.
14377 */
14378IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14379{
14380 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14381 return rcStrictIem;
14382
14383 /*
14384 * Switch back the state.
14385 */
14386 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14387 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14388 Assert(pOrgCtx != pDebugCtx);
14389 IEM_GET_CTX(pVCpu) = pOrgCtx;
14390
14391 /*
14392 * Execute the instruction in REM.
14393 */
14394 bool fRem = false;
14395 PVM pVM = pVCpu->CTX_SUFF(pVM);
14396 PVMCPU pVCpu = pVCpu;
14397 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14398#ifdef IEM_VERIFICATION_MODE_FULL_HM
14399 if ( HMIsEnabled(pVM)
14400 && pVCpu->iem.s.cIOReads == 0
14401 && pVCpu->iem.s.cIOWrites == 0
14402 && !pVCpu->iem.s.fProblematicMemory)
14403 {
14404 uint64_t uStartRip = pOrgCtx->rip;
14405 unsigned iLoops = 0;
14406 do
14407 {
14408 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14409 iLoops++;
14410 } while ( rc == VINF_SUCCESS
14411 || ( rc == VINF_EM_DBG_STEPPED
14412 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14413 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14414 || ( pOrgCtx->rip != pDebugCtx->rip
14415 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14416 && iLoops < 8) );
14417 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14418 rc = VINF_SUCCESS;
14419 }
14420#endif
14421 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14422 || rc == VINF_IOM_R3_IOPORT_READ
14423 || rc == VINF_IOM_R3_IOPORT_WRITE
14424 || rc == VINF_IOM_R3_MMIO_READ
14425 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14426 || rc == VINF_IOM_R3_MMIO_WRITE
14427 || rc == VINF_CPUM_R3_MSR_READ
14428 || rc == VINF_CPUM_R3_MSR_WRITE
14429 || rc == VINF_EM_RESCHEDULE
14430 )
14431 {
14432 EMRemLock(pVM);
14433 rc = REMR3EmulateInstruction(pVM, pVCpu);
14434 AssertRC(rc);
14435 EMRemUnlock(pVM);
14436 fRem = true;
14437 }
14438
14439# if 1 /* Skip unimplemented instructions for now. */
14440 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14441 {
14442 IEM_GET_CTX(pVCpu) = pOrgCtx;
14443 if (rc == VINF_EM_DBG_STEPPED)
14444 return VINF_SUCCESS;
14445 return rc;
14446 }
14447# endif
14448
14449 /*
14450 * Compare the register states.
14451 */
14452 unsigned cDiffs = 0;
14453 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14454 {
14455 //Log(("REM and IEM ends up with different registers!\n"));
14456 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14457
14458# define CHECK_FIELD(a_Field) \
14459 do \
14460 { \
14461 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14462 { \
14463 switch (sizeof(pOrgCtx->a_Field)) \
14464 { \
14465 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14466 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14467 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14468 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14469 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14470 } \
14471 cDiffs++; \
14472 } \
14473 } while (0)
14474# define CHECK_XSTATE_FIELD(a_Field) \
14475 do \
14476 { \
14477 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14478 { \
14479 switch (sizeof(pOrgXState->a_Field)) \
14480 { \
14481 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14482 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14483 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14484 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14485 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14486 } \
14487 cDiffs++; \
14488 } \
14489 } while (0)
14490
14491# define CHECK_BIT_FIELD(a_Field) \
14492 do \
14493 { \
14494 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14495 { \
14496 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14497 cDiffs++; \
14498 } \
14499 } while (0)
14500
14501# define CHECK_SEL(a_Sel) \
14502 do \
14503 { \
14504 CHECK_FIELD(a_Sel.Sel); \
14505 CHECK_FIELD(a_Sel.Attr.u); \
14506 CHECK_FIELD(a_Sel.u64Base); \
14507 CHECK_FIELD(a_Sel.u32Limit); \
14508 CHECK_FIELD(a_Sel.fFlags); \
14509 } while (0)
14510
14511 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14512 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14513
14514#if 1 /* The recompiler doesn't update these the intel way. */
14515 if (fRem)
14516 {
14517 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14518 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14519 pOrgXState->x87.CS = pDebugXState->x87.CS;
14520 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14521 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14522 pOrgXState->x87.DS = pDebugXState->x87.DS;
14523 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14524 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14525 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14526 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14527 }
14528#endif
14529 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14530 {
14531 RTAssertMsg2Weak(" the FPU state differs\n");
14532 cDiffs++;
14533 CHECK_XSTATE_FIELD(x87.FCW);
14534 CHECK_XSTATE_FIELD(x87.FSW);
14535 CHECK_XSTATE_FIELD(x87.FTW);
14536 CHECK_XSTATE_FIELD(x87.FOP);
14537 CHECK_XSTATE_FIELD(x87.FPUIP);
14538 CHECK_XSTATE_FIELD(x87.CS);
14539 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14540 CHECK_XSTATE_FIELD(x87.FPUDP);
14541 CHECK_XSTATE_FIELD(x87.DS);
14542 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14543 CHECK_XSTATE_FIELD(x87.MXCSR);
14544 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14545 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14546 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14547 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14548 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14549 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14550 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14551 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14552 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14553 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14554 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14555 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14556 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14557 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14558 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14559 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14560 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14561 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14562 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14563 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14564 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14565 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14566 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14567 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14568 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14569 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14570 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14571 }
14572 CHECK_FIELD(rip);
14573 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14574 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14575 {
14576 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14577 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14578 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14579 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14580 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14581 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14582 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14583 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14584 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14585 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14586 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14587 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14588 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14589 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14590 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14591 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14592 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14593 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14594 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14595 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14596 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14597 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14598 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14599 }
14600
14601 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14602 CHECK_FIELD(rax);
14603 CHECK_FIELD(rcx);
14604 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14605 CHECK_FIELD(rdx);
14606 CHECK_FIELD(rbx);
14607 CHECK_FIELD(rsp);
14608 CHECK_FIELD(rbp);
14609 CHECK_FIELD(rsi);
14610 CHECK_FIELD(rdi);
14611 CHECK_FIELD(r8);
14612 CHECK_FIELD(r9);
14613 CHECK_FIELD(r10);
14614 CHECK_FIELD(r11);
14615 CHECK_FIELD(r12);
14616 CHECK_FIELD(r13);
14617 CHECK_SEL(cs);
14618 CHECK_SEL(ss);
14619 CHECK_SEL(ds);
14620 CHECK_SEL(es);
14621 CHECK_SEL(fs);
14622 CHECK_SEL(gs);
14623 CHECK_FIELD(cr0);
14624
14625 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14626 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14627 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14628 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14629 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14630 {
14631 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14632 { /* ignore */ }
14633 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14634 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14635 && fRem)
14636 { /* ignore */ }
14637 else
14638 CHECK_FIELD(cr2);
14639 }
14640 CHECK_FIELD(cr3);
14641 CHECK_FIELD(cr4);
14642 CHECK_FIELD(dr[0]);
14643 CHECK_FIELD(dr[1]);
14644 CHECK_FIELD(dr[2]);
14645 CHECK_FIELD(dr[3]);
14646 CHECK_FIELD(dr[6]);
14647 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14648 CHECK_FIELD(dr[7]);
14649 CHECK_FIELD(gdtr.cbGdt);
14650 CHECK_FIELD(gdtr.pGdt);
14651 CHECK_FIELD(idtr.cbIdt);
14652 CHECK_FIELD(idtr.pIdt);
14653 CHECK_SEL(ldtr);
14654 CHECK_SEL(tr);
14655 CHECK_FIELD(SysEnter.cs);
14656 CHECK_FIELD(SysEnter.eip);
14657 CHECK_FIELD(SysEnter.esp);
14658 CHECK_FIELD(msrEFER);
14659 CHECK_FIELD(msrSTAR);
14660 CHECK_FIELD(msrPAT);
14661 CHECK_FIELD(msrLSTAR);
14662 CHECK_FIELD(msrCSTAR);
14663 CHECK_FIELD(msrSFMASK);
14664 CHECK_FIELD(msrKERNELGSBASE);
14665
14666 if (cDiffs != 0)
14667 {
14668 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14669 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14670 RTAssertPanic();
14671 static bool volatile s_fEnterDebugger = true;
14672 if (s_fEnterDebugger)
14673 DBGFSTOP(pVM);
14674
14675# if 1 /* Ignore unimplemented instructions for now. */
14676 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14677 rcStrictIem = VINF_SUCCESS;
14678# endif
14679 }
14680# undef CHECK_FIELD
14681# undef CHECK_BIT_FIELD
14682 }
14683
14684 /*
14685 * If the register state compared fine, check the verification event
14686 * records.
14687 */
14688 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14689 {
14690 /*
14691 * Compare verficiation event records.
14692 * - I/O port accesses should be a 1:1 match.
14693 */
14694 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14695 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14696 while (pIemRec && pOtherRec)
14697 {
14698 /* Since we might miss RAM writes and reads, ignore reads and check
14699 that any written memory is the same extra ones. */
14700 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14701 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14702 && pIemRec->pNext)
14703 {
14704 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14705 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14706 pIemRec = pIemRec->pNext;
14707 }
14708
14709 /* Do the compare. */
14710 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14711 {
14712 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14713 break;
14714 }
14715 bool fEquals;
14716 switch (pIemRec->enmEvent)
14717 {
14718 case IEMVERIFYEVENT_IOPORT_READ:
14719 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14720 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14721 break;
14722 case IEMVERIFYEVENT_IOPORT_WRITE:
14723 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14724 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14725 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14726 break;
14727 case IEMVERIFYEVENT_IOPORT_STR_READ:
14728 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14729 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14730 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14731 break;
14732 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14733 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14734 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14735 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14736 break;
14737 case IEMVERIFYEVENT_RAM_READ:
14738 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14739 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14740 break;
14741 case IEMVERIFYEVENT_RAM_WRITE:
14742 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14743 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14744 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14745 break;
14746 default:
14747 fEquals = false;
14748 break;
14749 }
14750 if (!fEquals)
14751 {
14752 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14753 break;
14754 }
14755
14756 /* advance */
14757 pIemRec = pIemRec->pNext;
14758 pOtherRec = pOtherRec->pNext;
14759 }
14760
14761 /* Ignore extra writes and reads. */
14762 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14763 {
14764 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14765 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14766 pIemRec = pIemRec->pNext;
14767 }
14768 if (pIemRec != NULL)
14769 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14770 else if (pOtherRec != NULL)
14771 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14772 }
14773 IEM_GET_CTX(pVCpu) = pOrgCtx;
14774
14775 return rcStrictIem;
14776}
14777
14778#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14779
14780/* stubs */
14781IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14782{
14783 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14784 return VERR_INTERNAL_ERROR;
14785}
14786
14787IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14788{
14789 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14790 return VERR_INTERNAL_ERROR;
14791}
14792
14793#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14794
14795
14796#ifdef LOG_ENABLED
14797/**
14798 * Logs the current instruction.
14799 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14800 * @param pCtx The current CPU context.
14801 * @param fSameCtx Set if we have the same context information as the VMM,
14802 * clear if we may have already executed an instruction in
14803 * our debug context. When clear, we assume IEMCPU holds
14804 * valid CPU mode info.
14805 */
14806IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14807{
14808# ifdef IN_RING3
14809 if (LogIs2Enabled())
14810 {
14811 char szInstr[256];
14812 uint32_t cbInstr = 0;
14813 if (fSameCtx)
14814 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14815 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14816 szInstr, sizeof(szInstr), &cbInstr);
14817 else
14818 {
14819 uint32_t fFlags = 0;
14820 switch (pVCpu->iem.s.enmCpuMode)
14821 {
14822 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14823 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14824 case IEMMODE_16BIT:
14825 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14826 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14827 else
14828 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14829 break;
14830 }
14831 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14832 szInstr, sizeof(szInstr), &cbInstr);
14833 }
14834
14835 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14836 Log2(("****\n"
14837 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14838 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14839 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14840 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14841 " %s\n"
14842 ,
14843 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14844 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14845 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14846 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14847 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14848 szInstr));
14849
14850 if (LogIs3Enabled())
14851 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14852 }
14853 else
14854# endif
14855 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14856 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14857 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14858}
14859#endif
14860
14861
14862/**
14863 * Makes status code addjustments (pass up from I/O and access handler)
14864 * as well as maintaining statistics.
14865 *
14866 * @returns Strict VBox status code to pass up.
14867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14868 * @param rcStrict The status from executing an instruction.
14869 */
14870DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14871{
14872 if (rcStrict != VINF_SUCCESS)
14873 {
14874 if (RT_SUCCESS(rcStrict))
14875 {
14876 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14877 || rcStrict == VINF_IOM_R3_IOPORT_READ
14878 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14879 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14880 || rcStrict == VINF_IOM_R3_MMIO_READ
14881 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14882 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14883 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14884 || rcStrict == VINF_CPUM_R3_MSR_READ
14885 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14886 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14887 || rcStrict == VINF_EM_RAW_TO_R3
14888 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14889 || rcStrict == VINF_EM_TRIPLE_FAULT
14890 /* raw-mode / virt handlers only: */
14891 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14892 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14893 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14894 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14895 || rcStrict == VINF_SELM_SYNC_GDT
14896 || rcStrict == VINF_CSAM_PENDING_ACTION
14897 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14898 /* nested hw.virt codes: */
14899 || rcStrict == VINF_SVM_VMEXIT
14900 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14901/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14902 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14903#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14904 if ( rcStrict == VINF_SVM_VMEXIT
14905 && rcPassUp == VINF_SUCCESS)
14906 rcStrict = VINF_SUCCESS;
14907 else
14908#endif
14909 if (rcPassUp == VINF_SUCCESS)
14910 pVCpu->iem.s.cRetInfStatuses++;
14911 else if ( rcPassUp < VINF_EM_FIRST
14912 || rcPassUp > VINF_EM_LAST
14913 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14914 {
14915 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14916 pVCpu->iem.s.cRetPassUpStatus++;
14917 rcStrict = rcPassUp;
14918 }
14919 else
14920 {
14921 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14922 pVCpu->iem.s.cRetInfStatuses++;
14923 }
14924 }
14925 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14926 pVCpu->iem.s.cRetAspectNotImplemented++;
14927 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14928 pVCpu->iem.s.cRetInstrNotImplemented++;
14929#ifdef IEM_VERIFICATION_MODE_FULL
14930 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14931 rcStrict = VINF_SUCCESS;
14932#endif
14933 else
14934 pVCpu->iem.s.cRetErrStatuses++;
14935 }
14936 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14937 {
14938 pVCpu->iem.s.cRetPassUpStatus++;
14939 rcStrict = pVCpu->iem.s.rcPassUp;
14940 }
14941
14942 return rcStrict;
14943}
14944
14945
14946/**
14947 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14948 * IEMExecOneWithPrefetchedByPC.
14949 *
14950 * Similar code is found in IEMExecLots.
14951 *
14952 * @return Strict VBox status code.
14953 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14955 * @param fExecuteInhibit If set, execute the instruction following CLI,
14956 * POP SS and MOV SS,GR.
14957 */
14958DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14959{
14960 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14961 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14962 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14963
14964#ifdef IEM_WITH_SETJMP
14965 VBOXSTRICTRC rcStrict;
14966 jmp_buf JmpBuf;
14967 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14968 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14969 if ((rcStrict = setjmp(JmpBuf)) == 0)
14970 {
14971 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14972 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14973 }
14974 else
14975 pVCpu->iem.s.cLongJumps++;
14976 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14977#else
14978 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14979 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14980#endif
14981 if (rcStrict == VINF_SUCCESS)
14982 pVCpu->iem.s.cInstructions++;
14983 if (pVCpu->iem.s.cActiveMappings > 0)
14984 {
14985 Assert(rcStrict != VINF_SUCCESS);
14986 iemMemRollback(pVCpu);
14987 }
14988 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14989 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14990 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14991
14992//#ifdef DEBUG
14993// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14994//#endif
14995
14996 /* Execute the next instruction as well if a cli, pop ss or
14997 mov ss, Gr has just completed successfully. */
14998 if ( fExecuteInhibit
14999 && rcStrict == VINF_SUCCESS
15000 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
15001 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
15002 {
15003 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
15004 if (rcStrict == VINF_SUCCESS)
15005 {
15006#ifdef LOG_ENABLED
15007 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
15008#endif
15009#ifdef IEM_WITH_SETJMP
15010 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15011 if ((rcStrict = setjmp(JmpBuf)) == 0)
15012 {
15013 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15014 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15015 }
15016 else
15017 pVCpu->iem.s.cLongJumps++;
15018 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15019#else
15020 IEM_OPCODE_GET_NEXT_U8(&b);
15021 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15022#endif
15023 if (rcStrict == VINF_SUCCESS)
15024 pVCpu->iem.s.cInstructions++;
15025 if (pVCpu->iem.s.cActiveMappings > 0)
15026 {
15027 Assert(rcStrict != VINF_SUCCESS);
15028 iemMemRollback(pVCpu);
15029 }
15030 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
15031 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
15032 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
15033 }
15034 else if (pVCpu->iem.s.cActiveMappings > 0)
15035 iemMemRollback(pVCpu);
15036 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15037 }
15038
15039 /*
15040 * Return value fiddling, statistics and sanity assertions.
15041 */
15042 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15043
15044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15046#if defined(IEM_VERIFICATION_MODE_FULL)
15047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15051#endif
15052 return rcStrict;
15053}
15054
15055
15056#ifdef IN_RC
15057/**
15058 * Re-enters raw-mode or ensure we return to ring-3.
15059 *
15060 * @returns rcStrict, maybe modified.
15061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15062 * @param pCtx The current CPU context.
15063 * @param rcStrict The status code returne by the interpreter.
15064 */
15065DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15066{
15067 if ( !pVCpu->iem.s.fInPatchCode
15068 && ( rcStrict == VINF_SUCCESS
15069 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15070 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15071 {
15072 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15073 CPUMRawEnter(pVCpu);
15074 else
15075 {
15076 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15077 rcStrict = VINF_EM_RESCHEDULE;
15078 }
15079 }
15080 return rcStrict;
15081}
15082#endif
15083
15084
15085/**
15086 * Execute one instruction.
15087 *
15088 * @return Strict VBox status code.
15089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15090 */
15091VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15092{
15093#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15094 if (++pVCpu->iem.s.cVerifyDepth == 1)
15095 iemExecVerificationModeSetup(pVCpu);
15096#endif
15097#ifdef LOG_ENABLED
15098 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15099 iemLogCurInstr(pVCpu, pCtx, true);
15100#endif
15101
15102 /*
15103 * Do the decoding and emulation.
15104 */
15105 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15106 if (rcStrict == VINF_SUCCESS)
15107 rcStrict = iemExecOneInner(pVCpu, true);
15108 else if (pVCpu->iem.s.cActiveMappings > 0)
15109 iemMemRollback(pVCpu);
15110
15111#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15112 /*
15113 * Assert some sanity.
15114 */
15115 if (pVCpu->iem.s.cVerifyDepth == 1)
15116 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15117 pVCpu->iem.s.cVerifyDepth--;
15118#endif
15119#ifdef IN_RC
15120 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15121#endif
15122 if (rcStrict != VINF_SUCCESS)
15123 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15124 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15125 return rcStrict;
15126}
15127
15128
15129VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15130{
15131 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15132 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15133
15134 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15135 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15136 if (rcStrict == VINF_SUCCESS)
15137 {
15138 rcStrict = iemExecOneInner(pVCpu, true);
15139 if (pcbWritten)
15140 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15141 }
15142 else if (pVCpu->iem.s.cActiveMappings > 0)
15143 iemMemRollback(pVCpu);
15144
15145#ifdef IN_RC
15146 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15147#endif
15148 return rcStrict;
15149}
15150
15151
15152VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15153 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15154{
15155 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15156 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15157
15158 VBOXSTRICTRC rcStrict;
15159 if ( cbOpcodeBytes
15160 && pCtx->rip == OpcodeBytesPC)
15161 {
15162 iemInitDecoder(pVCpu, false);
15163#ifdef IEM_WITH_CODE_TLB
15164 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15165 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15166 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15167 pVCpu->iem.s.offCurInstrStart = 0;
15168 pVCpu->iem.s.offInstrNextByte = 0;
15169#else
15170 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15171 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15172#endif
15173 rcStrict = VINF_SUCCESS;
15174 }
15175 else
15176 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15177 if (rcStrict == VINF_SUCCESS)
15178 rcStrict = iemExecOneInner(pVCpu, true);
15179 else if (pVCpu->iem.s.cActiveMappings > 0)
15180 iemMemRollback(pVCpu);
15181
15182#ifdef IN_RC
15183 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15184#endif
15185 return rcStrict;
15186}
15187
15188
15189VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15190{
15191 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15192 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15193
15194 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15195 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15196 if (rcStrict == VINF_SUCCESS)
15197 {
15198 rcStrict = iemExecOneInner(pVCpu, false);
15199 if (pcbWritten)
15200 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15201 }
15202 else if (pVCpu->iem.s.cActiveMappings > 0)
15203 iemMemRollback(pVCpu);
15204
15205#ifdef IN_RC
15206 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15207#endif
15208 return rcStrict;
15209}
15210
15211
15212VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15213 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15214{
15215 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15216 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15217
15218 VBOXSTRICTRC rcStrict;
15219 if ( cbOpcodeBytes
15220 && pCtx->rip == OpcodeBytesPC)
15221 {
15222 iemInitDecoder(pVCpu, true);
15223#ifdef IEM_WITH_CODE_TLB
15224 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15225 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15226 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15227 pVCpu->iem.s.offCurInstrStart = 0;
15228 pVCpu->iem.s.offInstrNextByte = 0;
15229#else
15230 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15231 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15232#endif
15233 rcStrict = VINF_SUCCESS;
15234 }
15235 else
15236 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15237 if (rcStrict == VINF_SUCCESS)
15238 rcStrict = iemExecOneInner(pVCpu, false);
15239 else if (pVCpu->iem.s.cActiveMappings > 0)
15240 iemMemRollback(pVCpu);
15241
15242#ifdef IN_RC
15243 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15244#endif
15245 return rcStrict;
15246}
15247
15248
15249/**
15250 * For debugging DISGetParamSize, may come in handy.
15251 *
15252 * @returns Strict VBox status code.
15253 * @param pVCpu The cross context virtual CPU structure of the
15254 * calling EMT.
15255 * @param pCtxCore The context core structure.
15256 * @param OpcodeBytesPC The PC of the opcode bytes.
15257 * @param pvOpcodeBytes Prefeched opcode bytes.
15258 * @param cbOpcodeBytes Number of prefetched bytes.
15259 * @param pcbWritten Where to return the number of bytes written.
15260 * Optional.
15261 */
15262VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15263 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15264 uint32_t *pcbWritten)
15265{
15266 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15267 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15268
15269 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15270 VBOXSTRICTRC rcStrict;
15271 if ( cbOpcodeBytes
15272 && pCtx->rip == OpcodeBytesPC)
15273 {
15274 iemInitDecoder(pVCpu, true);
15275#ifdef IEM_WITH_CODE_TLB
15276 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15277 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15278 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15279 pVCpu->iem.s.offCurInstrStart = 0;
15280 pVCpu->iem.s.offInstrNextByte = 0;
15281#else
15282 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15283 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15284#endif
15285 rcStrict = VINF_SUCCESS;
15286 }
15287 else
15288 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15289 if (rcStrict == VINF_SUCCESS)
15290 {
15291 rcStrict = iemExecOneInner(pVCpu, false);
15292 if (pcbWritten)
15293 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15294 }
15295 else if (pVCpu->iem.s.cActiveMappings > 0)
15296 iemMemRollback(pVCpu);
15297
15298#ifdef IN_RC
15299 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15300#endif
15301 return rcStrict;
15302}
15303
15304
15305VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15306{
15307 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15308
15309#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15310 /*
15311 * See if there is an interrupt pending in TRPM, inject it if we can.
15312 */
15313 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15314# ifdef IEM_VERIFICATION_MODE_FULL
15315 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15316# endif
15317
15318 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15319# if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
15320 bool fIntrEnabled = pCtx->hwvirt.Gif;
15321 if (fIntrEnabled)
15322 {
15323 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15324 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15325 else
15326 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15327 }
15328# else
15329 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15330# endif
15331 if ( fIntrEnabled
15332 && TRPMHasTrap(pVCpu)
15333 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15334 {
15335 uint8_t u8TrapNo;
15336 TRPMEVENT enmType;
15337 RTGCUINT uErrCode;
15338 RTGCPTR uCr2;
15339 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15340 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15341 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15342 TRPMResetTrap(pVCpu);
15343 }
15344
15345 /*
15346 * Log the state.
15347 */
15348# ifdef LOG_ENABLED
15349 iemLogCurInstr(pVCpu, pCtx, true);
15350# endif
15351
15352 /*
15353 * Do the decoding and emulation.
15354 */
15355 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15356 if (rcStrict == VINF_SUCCESS)
15357 rcStrict = iemExecOneInner(pVCpu, true);
15358 else if (pVCpu->iem.s.cActiveMappings > 0)
15359 iemMemRollback(pVCpu);
15360
15361 /*
15362 * Assert some sanity.
15363 */
15364 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15365
15366 /*
15367 * Log and return.
15368 */
15369 if (rcStrict != VINF_SUCCESS)
15370 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15371 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15372 if (pcInstructions)
15373 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15374 return rcStrict;
15375
15376#else /* Not verification mode */
15377
15378 /*
15379 * See if there is an interrupt pending in TRPM, inject it if we can.
15380 */
15381 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15382# ifdef IEM_VERIFICATION_MODE_FULL
15383 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15384# endif
15385
15386 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15387# if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
15388 bool fIntrEnabled = pCtx->hwvirt.fGif;
15389 if (fIntrEnabled)
15390 {
15391 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15392 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15393 else
15394 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15395 }
15396# else
15397 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15398# endif
15399 if ( fIntrEnabled
15400 && TRPMHasTrap(pVCpu)
15401 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15402 {
15403 uint8_t u8TrapNo;
15404 TRPMEVENT enmType;
15405 RTGCUINT uErrCode;
15406 RTGCPTR uCr2;
15407 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15408 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15409 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15410 TRPMResetTrap(pVCpu);
15411 }
15412
15413 /*
15414 * Initial decoder init w/ prefetch, then setup setjmp.
15415 */
15416 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15417 if (rcStrict == VINF_SUCCESS)
15418 {
15419# ifdef IEM_WITH_SETJMP
15420 jmp_buf JmpBuf;
15421 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15422 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15423 pVCpu->iem.s.cActiveMappings = 0;
15424 if ((rcStrict = setjmp(JmpBuf)) == 0)
15425# endif
15426 {
15427 /*
15428 * The run loop. We limit ourselves to 4096 instructions right now.
15429 */
15430 PVM pVM = pVCpu->CTX_SUFF(pVM);
15431 uint32_t cInstr = 4096;
15432 for (;;)
15433 {
15434 /*
15435 * Log the state.
15436 */
15437# ifdef LOG_ENABLED
15438 iemLogCurInstr(pVCpu, pCtx, true);
15439# endif
15440
15441 /*
15442 * Do the decoding and emulation.
15443 */
15444 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15445 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15446 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15447 {
15448 Assert(pVCpu->iem.s.cActiveMappings == 0);
15449 pVCpu->iem.s.cInstructions++;
15450 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15451 {
15452 uint32_t fCpu = pVCpu->fLocalForcedActions
15453 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15454 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15455 | VMCPU_FF_TLB_FLUSH
15456# ifdef VBOX_WITH_RAW_MODE
15457 | VMCPU_FF_TRPM_SYNC_IDT
15458 | VMCPU_FF_SELM_SYNC_TSS
15459 | VMCPU_FF_SELM_SYNC_GDT
15460 | VMCPU_FF_SELM_SYNC_LDT
15461# endif
15462 | VMCPU_FF_INHIBIT_INTERRUPTS
15463 | VMCPU_FF_BLOCK_NMIS
15464 | VMCPU_FF_UNHALT ));
15465
15466 if (RT_LIKELY( ( !fCpu
15467 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15468 && !pCtx->rflags.Bits.u1IF) )
15469 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15470 {
15471 if (cInstr-- > 0)
15472 {
15473 Assert(pVCpu->iem.s.cActiveMappings == 0);
15474 iemReInitDecoder(pVCpu);
15475 continue;
15476 }
15477 }
15478 }
15479 Assert(pVCpu->iem.s.cActiveMappings == 0);
15480 }
15481 else if (pVCpu->iem.s.cActiveMappings > 0)
15482 iemMemRollback(pVCpu);
15483 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15484 break;
15485 }
15486 }
15487# ifdef IEM_WITH_SETJMP
15488 else
15489 {
15490 if (pVCpu->iem.s.cActiveMappings > 0)
15491 iemMemRollback(pVCpu);
15492 pVCpu->iem.s.cLongJumps++;
15493 }
15494 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15495# endif
15496
15497 /*
15498 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15499 */
15500 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15501 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15502# if defined(IEM_VERIFICATION_MODE_FULL)
15503 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15505 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15507# endif
15508 }
15509 else
15510 {
15511 if (pVCpu->iem.s.cActiveMappings > 0)
15512 iemMemRollback(pVCpu);
15513
15514# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15515 /*
15516 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15517 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15518 */
15519 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15520# endif
15521 }
15522
15523 /*
15524 * Maybe re-enter raw-mode and log.
15525 */
15526# ifdef IN_RC
15527 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15528# endif
15529 if (rcStrict != VINF_SUCCESS)
15530 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15531 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15532 if (pcInstructions)
15533 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15534 return rcStrict;
15535#endif /* Not verification mode */
15536}
15537
15538
15539
15540/**
15541 * Injects a trap, fault, abort, software interrupt or external interrupt.
15542 *
15543 * The parameter list matches TRPMQueryTrapAll pretty closely.
15544 *
15545 * @returns Strict VBox status code.
15546 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15547 * @param u8TrapNo The trap number.
15548 * @param enmType What type is it (trap/fault/abort), software
15549 * interrupt or hardware interrupt.
15550 * @param uErrCode The error code if applicable.
15551 * @param uCr2 The CR2 value if applicable.
15552 * @param cbInstr The instruction length (only relevant for
15553 * software interrupts).
15554 */
15555VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15556 uint8_t cbInstr)
15557{
15558 iemInitDecoder(pVCpu, false);
15559#ifdef DBGFTRACE_ENABLED
15560 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15561 u8TrapNo, enmType, uErrCode, uCr2);
15562#endif
15563
15564 uint32_t fFlags;
15565 switch (enmType)
15566 {
15567 case TRPM_HARDWARE_INT:
15568 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15569 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15570 uErrCode = uCr2 = 0;
15571 break;
15572
15573 case TRPM_SOFTWARE_INT:
15574 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15575 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15576 uErrCode = uCr2 = 0;
15577 break;
15578
15579 case TRPM_TRAP:
15580 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15581 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15582 if (u8TrapNo == X86_XCPT_PF)
15583 fFlags |= IEM_XCPT_FLAGS_CR2;
15584 switch (u8TrapNo)
15585 {
15586 case X86_XCPT_DF:
15587 case X86_XCPT_TS:
15588 case X86_XCPT_NP:
15589 case X86_XCPT_SS:
15590 case X86_XCPT_PF:
15591 case X86_XCPT_AC:
15592 fFlags |= IEM_XCPT_FLAGS_ERR;
15593 break;
15594
15595 case X86_XCPT_NMI:
15596 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15597 break;
15598 }
15599 break;
15600
15601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15602 }
15603
15604 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15605
15606 if (pVCpu->iem.s.cActiveMappings > 0)
15607 iemMemRollback(pVCpu);
15608 return rcStrict;
15609}
15610
15611
15612/**
15613 * Injects the active TRPM event.
15614 *
15615 * @returns Strict VBox status code.
15616 * @param pVCpu The cross context virtual CPU structure.
15617 */
15618VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15619{
15620#ifndef IEM_IMPLEMENTS_TASKSWITCH
15621 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15622#else
15623 uint8_t u8TrapNo;
15624 TRPMEVENT enmType;
15625 RTGCUINT uErrCode;
15626 RTGCUINTPTR uCr2;
15627 uint8_t cbInstr;
15628 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15629 if (RT_FAILURE(rc))
15630 return rc;
15631
15632 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15633
15634 /** @todo Are there any other codes that imply the event was successfully
15635 * delivered to the guest? See @bugref{6607}. */
15636 if ( rcStrict == VINF_SUCCESS
15637 || rcStrict == VINF_IEM_RAISED_XCPT)
15638 {
15639 TRPMResetTrap(pVCpu);
15640 }
15641 return rcStrict;
15642#endif
15643}
15644
15645
15646VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15647{
15648 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15649 return VERR_NOT_IMPLEMENTED;
15650}
15651
15652
15653VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15654{
15655 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15656 return VERR_NOT_IMPLEMENTED;
15657}
15658
15659
15660#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15661/**
15662 * Executes a IRET instruction with default operand size.
15663 *
15664 * This is for PATM.
15665 *
15666 * @returns VBox status code.
15667 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15668 * @param pCtxCore The register frame.
15669 */
15670VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15671{
15672 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15673
15674 iemCtxCoreToCtx(pCtx, pCtxCore);
15675 iemInitDecoder(pVCpu);
15676 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15677 if (rcStrict == VINF_SUCCESS)
15678 iemCtxToCtxCore(pCtxCore, pCtx);
15679 else
15680 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15681 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15682 return rcStrict;
15683}
15684#endif
15685
15686
15687/**
15688 * Macro used by the IEMExec* method to check the given instruction length.
15689 *
15690 * Will return on failure!
15691 *
15692 * @param a_cbInstr The given instruction length.
15693 * @param a_cbMin The minimum length.
15694 */
15695#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15696 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15697 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15698
15699
15700/**
15701 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15702 *
15703 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15704 *
15705 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15707 * @param rcStrict The status code to fiddle.
15708 */
15709DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15710{
15711 iemUninitExec(pVCpu);
15712#ifdef IN_RC
15713 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15714 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15715#else
15716 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15717#endif
15718}
15719
15720
15721/**
15722 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15723 *
15724 * This API ASSUMES that the caller has already verified that the guest code is
15725 * allowed to access the I/O port. (The I/O port is in the DX register in the
15726 * guest state.)
15727 *
15728 * @returns Strict VBox status code.
15729 * @param pVCpu The cross context virtual CPU structure.
15730 * @param cbValue The size of the I/O port access (1, 2, or 4).
15731 * @param enmAddrMode The addressing mode.
15732 * @param fRepPrefix Indicates whether a repeat prefix is used
15733 * (doesn't matter which for this instruction).
15734 * @param cbInstr The instruction length in bytes.
15735 * @param iEffSeg The effective segment address.
15736 * @param fIoChecked Whether the access to the I/O port has been
15737 * checked or not. It's typically checked in the
15738 * HM scenario.
15739 */
15740VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15741 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15742{
15743 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15744 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15745
15746 /*
15747 * State init.
15748 */
15749 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15750
15751 /*
15752 * Switch orgy for getting to the right handler.
15753 */
15754 VBOXSTRICTRC rcStrict;
15755 if (fRepPrefix)
15756 {
15757 switch (enmAddrMode)
15758 {
15759 case IEMMODE_16BIT:
15760 switch (cbValue)
15761 {
15762 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15763 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15764 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15765 default:
15766 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15767 }
15768 break;
15769
15770 case IEMMODE_32BIT:
15771 switch (cbValue)
15772 {
15773 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15774 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15775 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15776 default:
15777 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15778 }
15779 break;
15780
15781 case IEMMODE_64BIT:
15782 switch (cbValue)
15783 {
15784 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15785 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15786 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15787 default:
15788 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15789 }
15790 break;
15791
15792 default:
15793 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15794 }
15795 }
15796 else
15797 {
15798 switch (enmAddrMode)
15799 {
15800 case IEMMODE_16BIT:
15801 switch (cbValue)
15802 {
15803 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15804 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15805 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15806 default:
15807 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15808 }
15809 break;
15810
15811 case IEMMODE_32BIT:
15812 switch (cbValue)
15813 {
15814 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15815 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15816 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15817 default:
15818 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15819 }
15820 break;
15821
15822 case IEMMODE_64BIT:
15823 switch (cbValue)
15824 {
15825 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15826 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15827 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15828 default:
15829 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15830 }
15831 break;
15832
15833 default:
15834 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15835 }
15836 }
15837
15838 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15839}
15840
15841
15842/**
15843 * Interface for HM and EM for executing string I/O IN (read) instructions.
15844 *
15845 * This API ASSUMES that the caller has already verified that the guest code is
15846 * allowed to access the I/O port. (The I/O port is in the DX register in the
15847 * guest state.)
15848 *
15849 * @returns Strict VBox status code.
15850 * @param pVCpu The cross context virtual CPU structure.
15851 * @param cbValue The size of the I/O port access (1, 2, or 4).
15852 * @param enmAddrMode The addressing mode.
15853 * @param fRepPrefix Indicates whether a repeat prefix is used
15854 * (doesn't matter which for this instruction).
15855 * @param cbInstr The instruction length in bytes.
15856 * @param fIoChecked Whether the access to the I/O port has been
15857 * checked or not. It's typically checked in the
15858 * HM scenario.
15859 */
15860VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15861 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15862{
15863 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15864
15865 /*
15866 * State init.
15867 */
15868 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15869
15870 /*
15871 * Switch orgy for getting to the right handler.
15872 */
15873 VBOXSTRICTRC rcStrict;
15874 if (fRepPrefix)
15875 {
15876 switch (enmAddrMode)
15877 {
15878 case IEMMODE_16BIT:
15879 switch (cbValue)
15880 {
15881 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15882 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15883 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15884 default:
15885 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15886 }
15887 break;
15888
15889 case IEMMODE_32BIT:
15890 switch (cbValue)
15891 {
15892 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15893 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15894 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15895 default:
15896 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15897 }
15898 break;
15899
15900 case IEMMODE_64BIT:
15901 switch (cbValue)
15902 {
15903 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15904 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15905 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15906 default:
15907 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15908 }
15909 break;
15910
15911 default:
15912 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15913 }
15914 }
15915 else
15916 {
15917 switch (enmAddrMode)
15918 {
15919 case IEMMODE_16BIT:
15920 switch (cbValue)
15921 {
15922 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15923 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15924 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15925 default:
15926 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15927 }
15928 break;
15929
15930 case IEMMODE_32BIT:
15931 switch (cbValue)
15932 {
15933 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15934 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15935 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15936 default:
15937 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15938 }
15939 break;
15940
15941 case IEMMODE_64BIT:
15942 switch (cbValue)
15943 {
15944 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15945 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15946 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15947 default:
15948 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15949 }
15950 break;
15951
15952 default:
15953 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15954 }
15955 }
15956
15957 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15958}
15959
15960
15961/**
15962 * Interface for rawmode to write execute an OUT instruction.
15963 *
15964 * @returns Strict VBox status code.
15965 * @param pVCpu The cross context virtual CPU structure.
15966 * @param cbInstr The instruction length in bytes.
15967 * @param u16Port The port to read.
15968 * @param cbReg The register size.
15969 *
15970 * @remarks In ring-0 not all of the state needs to be synced in.
15971 */
15972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15973{
15974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15975 Assert(cbReg <= 4 && cbReg != 3);
15976
15977 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15978 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15979 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15980}
15981
15982
15983/**
15984 * Interface for rawmode to write execute an IN instruction.
15985 *
15986 * @returns Strict VBox status code.
15987 * @param pVCpu The cross context virtual CPU structure.
15988 * @param cbInstr The instruction length in bytes.
15989 * @param u16Port The port to read.
15990 * @param cbReg The register size.
15991 */
15992VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15993{
15994 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15995 Assert(cbReg <= 4 && cbReg != 3);
15996
15997 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15998 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15999 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16000}
16001
16002
16003/**
16004 * Interface for HM and EM to write to a CRx register.
16005 *
16006 * @returns Strict VBox status code.
16007 * @param pVCpu The cross context virtual CPU structure.
16008 * @param cbInstr The instruction length in bytes.
16009 * @param iCrReg The control register number (destination).
16010 * @param iGReg The general purpose register number (source).
16011 *
16012 * @remarks In ring-0 not all of the state needs to be synced in.
16013 */
16014VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
16015{
16016 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16017 Assert(iCrReg < 16);
16018 Assert(iGReg < 16);
16019
16020 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16021 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
16022 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16023}
16024
16025
16026/**
16027 * Interface for HM and EM to read from a CRx register.
16028 *
16029 * @returns Strict VBox status code.
16030 * @param pVCpu The cross context virtual CPU structure.
16031 * @param cbInstr The instruction length in bytes.
16032 * @param iGReg The general purpose register number (destination).
16033 * @param iCrReg The control register number (source).
16034 *
16035 * @remarks In ring-0 not all of the state needs to be synced in.
16036 */
16037VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
16038{
16039 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16040 Assert(iCrReg < 16);
16041 Assert(iGReg < 16);
16042
16043 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16044 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
16045 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16046}
16047
16048
16049/**
16050 * Interface for HM and EM to clear the CR0[TS] bit.
16051 *
16052 * @returns Strict VBox status code.
16053 * @param pVCpu The cross context virtual CPU structure.
16054 * @param cbInstr The instruction length in bytes.
16055 *
16056 * @remarks In ring-0 not all of the state needs to be synced in.
16057 */
16058VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16059{
16060 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16061
16062 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16063 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16064 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16065}
16066
16067
16068/**
16069 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16070 *
16071 * @returns Strict VBox status code.
16072 * @param pVCpu The cross context virtual CPU structure.
16073 * @param cbInstr The instruction length in bytes.
16074 * @param uValue The value to load into CR0.
16075 *
16076 * @remarks In ring-0 not all of the state needs to be synced in.
16077 */
16078VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16079{
16080 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16081
16082 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16083 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16084 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16085}
16086
16087
16088/**
16089 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16090 *
16091 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16092 *
16093 * @returns Strict VBox status code.
16094 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16095 * @param cbInstr The instruction length in bytes.
16096 * @remarks In ring-0 not all of the state needs to be synced in.
16097 * @thread EMT(pVCpu)
16098 */
16099VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16100{
16101 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16102
16103 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16104 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16106}
16107
16108
16109/**
16110 * Interface for HM and EM to emulate the INVLPG instruction.
16111 *
16112 * @param pVCpu The cross context virtual CPU structure.
16113 * @param cbInstr The instruction length in bytes.
16114 * @param GCPtrPage The effective address of the page to invalidate.
16115 *
16116 * @remarks In ring-0 not all of the state needs to be synced in.
16117 */
16118VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16119{
16120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16121
16122 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16123 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16124 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16125}
16126
16127
16128/**
16129 * Interface for HM and EM to emulate the INVPCID instruction.
16130 *
16131 * @param pVCpu The cross context virtual CPU structure.
16132 * @param cbInstr The instruction length in bytes.
16133 * @param uType The invalidation type.
16134 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16135 *
16136 * @remarks In ring-0 not all of the state needs to be synced in.
16137 */
16138VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16139{
16140 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16141
16142 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16143 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16144 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16145}
16146
16147
16148/**
16149 * Checks if IEM is in the process of delivering an event (interrupt or
16150 * exception).
16151 *
16152 * @returns true if we're in the process of raising an interrupt or exception,
16153 * false otherwise.
16154 * @param pVCpu The cross context virtual CPU structure.
16155 * @param puVector Where to store the vector associated with the
16156 * currently delivered event, optional.
16157 * @param pfFlags Where to store th event delivery flags (see
16158 * IEM_XCPT_FLAGS_XXX), optional.
16159 * @param puErr Where to store the error code associated with the
16160 * event, optional.
16161 * @param puCr2 Where to store the CR2 associated with the event,
16162 * optional.
16163 * @remarks The caller should check the flags to determine if the error code and
16164 * CR2 are valid for the event.
16165 */
16166VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16167{
16168 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16169 if (fRaisingXcpt)
16170 {
16171 if (puVector)
16172 *puVector = pVCpu->iem.s.uCurXcpt;
16173 if (pfFlags)
16174 *pfFlags = pVCpu->iem.s.fCurXcpt;
16175 if (puErr)
16176 *puErr = pVCpu->iem.s.uCurXcptErr;
16177 if (puCr2)
16178 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16179 }
16180 return fRaisingXcpt;
16181}
16182
16183#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
16184/**
16185 * Interface for HM and EM to emulate the CLGI instruction.
16186 *
16187 * @returns Strict VBox status code.
16188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16189 * @param cbInstr The instruction length in bytes.
16190 * @thread EMT(pVCpu)
16191 */
16192VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16193{
16194 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16195
16196 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16197 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16198 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16199}
16200
16201
16202/**
16203 * Interface for HM and EM to emulate the STGI instruction.
16204 *
16205 * @returns Strict VBox status code.
16206 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16207 * @param cbInstr The instruction length in bytes.
16208 * @thread EMT(pVCpu)
16209 */
16210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16211{
16212 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16213
16214 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16215 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16216 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16217}
16218
16219
16220/**
16221 * Interface for HM and EM to emulate the VMLOAD instruction.
16222 *
16223 * @returns Strict VBox status code.
16224 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16225 * @param cbInstr The instruction length in bytes.
16226 * @thread EMT(pVCpu)
16227 */
16228VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16229{
16230 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16231
16232 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16233 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16234 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16235}
16236
16237
16238/**
16239 * Interface for HM and EM to emulate the VMSAVE instruction.
16240 *
16241 * @returns Strict VBox status code.
16242 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16243 * @param cbInstr The instruction length in bytes.
16244 * @thread EMT(pVCpu)
16245 */
16246VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16247{
16248 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16249
16250 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16251 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16252 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16253}
16254
16255
16256/**
16257 * Interface for HM and EM to emulate the INVLPGA instruction.
16258 *
16259 * @returns Strict VBox status code.
16260 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16261 * @param cbInstr The instruction length in bytes.
16262 * @thread EMT(pVCpu)
16263 */
16264VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16265{
16266 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16267
16268 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16269 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16270 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16271}
16272
16273
16274/**
16275 * Interface for HM and EM to emulate the VMRUN instruction.
16276 *
16277 * @returns Strict VBox status code.
16278 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16279 * @param cbInstr The instruction length in bytes.
16280 * @thread EMT(pVCpu)
16281 */
16282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16283{
16284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16285
16286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16287 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16288 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16289}
16290
16291
16292/**
16293 * Interface for HM and EM to emulate \#VMEXIT.
16294 *
16295 * @returns Strict VBox status code.
16296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16297 * @param uExitCode The exit code.
16298 * @param uExitInfo1 The exit info. 1 field.
16299 * @param uExitInfo2 The exit info. 2 field.
16300 * @thread EMT(pVCpu)
16301 */
16302VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16303{
16304 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16305 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16306}
16307#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
16308
16309#ifdef IN_RING3
16310
16311/**
16312 * Handles the unlikely and probably fatal merge cases.
16313 *
16314 * @returns Merged status code.
16315 * @param rcStrict Current EM status code.
16316 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16317 * with @a rcStrict.
16318 * @param iMemMap The memory mapping index. For error reporting only.
16319 * @param pVCpu The cross context virtual CPU structure of the calling
16320 * thread, for error reporting only.
16321 */
16322DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16323 unsigned iMemMap, PVMCPU pVCpu)
16324{
16325 if (RT_FAILURE_NP(rcStrict))
16326 return rcStrict;
16327
16328 if (RT_FAILURE_NP(rcStrictCommit))
16329 return rcStrictCommit;
16330
16331 if (rcStrict == rcStrictCommit)
16332 return rcStrictCommit;
16333
16334 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16335 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16336 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16337 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16338 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16339 return VERR_IOM_FF_STATUS_IPE;
16340}
16341
16342
16343/**
16344 * Helper for IOMR3ProcessForceFlag.
16345 *
16346 * @returns Merged status code.
16347 * @param rcStrict Current EM status code.
16348 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16349 * with @a rcStrict.
16350 * @param iMemMap The memory mapping index. For error reporting only.
16351 * @param pVCpu The cross context virtual CPU structure of the calling
16352 * thread, for error reporting only.
16353 */
16354DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16355{
16356 /* Simple. */
16357 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16358 return rcStrictCommit;
16359
16360 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16361 return rcStrict;
16362
16363 /* EM scheduling status codes. */
16364 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16365 && rcStrict <= VINF_EM_LAST))
16366 {
16367 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16368 && rcStrictCommit <= VINF_EM_LAST))
16369 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16370 }
16371
16372 /* Unlikely */
16373 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16374}
16375
16376
16377/**
16378 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16379 *
16380 * @returns Merge between @a rcStrict and what the commit operation returned.
16381 * @param pVM The cross context VM structure.
16382 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16383 * @param rcStrict The status code returned by ring-0 or raw-mode.
16384 */
16385VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16386{
16387 /*
16388 * Reset the pending commit.
16389 */
16390 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16391 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16392 ("%#x %#x %#x\n",
16393 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16395
16396 /*
16397 * Commit the pending bounce buffers (usually just one).
16398 */
16399 unsigned cBufs = 0;
16400 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16401 while (iMemMap-- > 0)
16402 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16403 {
16404 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16405 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16406 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16407
16408 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16409 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16410 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16411
16412 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16413 {
16414 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16416 pbBuf,
16417 cbFirst,
16418 PGMACCESSORIGIN_IEM);
16419 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16420 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16421 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16422 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16423 }
16424
16425 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16426 {
16427 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16429 pbBuf + cbFirst,
16430 cbSecond,
16431 PGMACCESSORIGIN_IEM);
16432 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16433 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16434 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16435 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16436 }
16437 cBufs++;
16438 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16439 }
16440
16441 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16442 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16443 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16444 pVCpu->iem.s.cActiveMappings = 0;
16445 return rcStrict;
16446}
16447
16448#endif /* IN_RING3 */
16449
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette